1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
|
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* NETC NTMP (NETC Table Management Protocol) 2.0 Library
* Copyright 2025 NXP
*/
#include <linux/dma-mapping.h>
#include <linux/fsl/netc_global.h>
#include <linux/iopoll.h>
#include "ntmp_private.h"
#define NETC_CBDR_TIMEOUT 1000 /* us */
#define NETC_CBDR_DELAY_US 10
#define NETC_CBDR_MR_EN BIT(31)
#define NTMP_BASE_ADDR_ALIGN 128
#define NTMP_DATA_ADDR_ALIGN 32
/* Define NTMP Table ID */
#define NTMP_MAFT_ID 1
#define NTMP_RSST_ID 3
/* Generic Update Actions for most tables */
#define NTMP_GEN_UA_CFGEU BIT(0)
#define NTMP_GEN_UA_STSEU BIT(1)
#define NTMP_ENTRY_ID_SIZE 4
#define RSST_ENTRY_NUM 64
#define RSST_STSE_DATA_SIZE(n) ((n) * 8)
#define RSST_CFGE_DATA_SIZE(n) (n)
int ntmp_init_cbdr(struct netc_cbdr *cbdr, struct device *dev,
const struct netc_cbdr_regs *regs)
{
int cbd_num = NETC_CBDR_BD_NUM;
size_t size;
size = cbd_num * sizeof(union netc_cbd) + NTMP_BASE_ADDR_ALIGN;
cbdr->addr_base = dma_alloc_coherent(dev, size, &cbdr->dma_base,
GFP_KERNEL);
if (!cbdr->addr_base)
return -ENOMEM;
cbdr->dma_size = size;
cbdr->bd_num = cbd_num;
cbdr->regs = *regs;
cbdr->dev = dev;
/* The base address of the Control BD Ring must be 128 bytes aligned */
cbdr->dma_base_align = ALIGN(cbdr->dma_base, NTMP_BASE_ADDR_ALIGN);
cbdr->addr_base_align = PTR_ALIGN(cbdr->addr_base,
NTMP_BASE_ADDR_ALIGN);
cbdr->next_to_clean = 0;
cbdr->next_to_use = 0;
spin_lock_init(&cbdr->ring_lock);
/* Step 1: Configure the base address of the Control BD Ring */
netc_write(cbdr->regs.bar0, lower_32_bits(cbdr->dma_base_align));
netc_write(cbdr->regs.bar1, upper_32_bits(cbdr->dma_base_align));
/* Step 2: Configure the producer index register */
netc_write(cbdr->regs.pir, cbdr->next_to_clean);
/* Step 3: Configure the consumer index register */
netc_write(cbdr->regs.cir, cbdr->next_to_use);
/* Step4: Configure the number of BDs of the Control BD Ring */
netc_write(cbdr->regs.lenr, cbdr->bd_num);
/* Step 5: Enable the Control BD Ring */
netc_write(cbdr->regs.mr, NETC_CBDR_MR_EN);
return 0;
}
EXPORT_SYMBOL_GPL(ntmp_init_cbdr);
void ntmp_free_cbdr(struct netc_cbdr *cbdr)
{
/* Disable the Control BD Ring */
netc_write(cbdr->regs.mr, 0);
dma_free_coherent(cbdr->dev, cbdr->dma_size, cbdr->addr_base,
cbdr->dma_base);
memset(cbdr, 0, sizeof(*cbdr));
}
EXPORT_SYMBOL_GPL(ntmp_free_cbdr);
static int ntmp_get_free_cbd_num(struct netc_cbdr *cbdr)
{
return (cbdr->next_to_clean - cbdr->next_to_use - 1 +
cbdr->bd_num) % cbdr->bd_num;
}
static union netc_cbd *ntmp_get_cbd(struct netc_cbdr *cbdr, int index)
{
return &((union netc_cbd *)(cbdr->addr_base_align))[index];
}
static void ntmp_clean_cbdr(struct netc_cbdr *cbdr)
{
union netc_cbd *cbd;
int i;
i = cbdr->next_to_clean;
while (netc_read(cbdr->regs.cir) != i) {
cbd = ntmp_get_cbd(cbdr, i);
memset(cbd, 0, sizeof(*cbd));
i = (i + 1) % cbdr->bd_num;
}
cbdr->next_to_clean = i;
}
static int netc_xmit_ntmp_cmd(struct ntmp_user *user, union netc_cbd *cbd)
{
union netc_cbd *cur_cbd;
struct netc_cbdr *cbdr;
int i, err;
u16 status;
u32 val;
/* Currently only i.MX95 ENETC is supported, and it only has one
* command BD ring
*/
cbdr = &user->ring[0];
spin_lock_bh(&cbdr->ring_lock);
if (unlikely(!ntmp_get_free_cbd_num(cbdr)))
ntmp_clean_cbdr(cbdr);
i = cbdr->next_to_use;
cur_cbd = ntmp_get_cbd(cbdr, i);
*cur_cbd = *cbd;
dma_wmb();
/* Update producer index of both software and hardware */
i = (i + 1) % cbdr->bd_num;
cbdr->next_to_use = i;
netc_write(cbdr->regs.pir, i);
err = read_poll_timeout_atomic(netc_read, val, val == i,
NETC_CBDR_DELAY_US, NETC_CBDR_TIMEOUT,
true, cbdr->regs.cir);
if (unlikely(err))
goto cbdr_unlock;
dma_rmb();
/* Get the writeback command BD, because the caller may need
* to check some other fields of the response header.
*/
*cbd = *cur_cbd;
/* Check the writeback error status */
status = le16_to_cpu(cbd->resp_hdr.error_rr) & NTMP_RESP_ERROR;
if (unlikely(status)) {
err = -EIO;
dev_err(user->dev, "Command BD error: 0x%04x\n", status);
}
ntmp_clean_cbdr(cbdr);
dma_wmb();
cbdr_unlock:
spin_unlock_bh(&cbdr->ring_lock);
return err;
}
static int ntmp_alloc_data_mem(struct ntmp_dma_buf *data, void **buf_align)
{
void *buf;
buf = dma_alloc_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
&data->dma, GFP_KERNEL);
if (!buf)
return -ENOMEM;
data->buf = buf;
*buf_align = PTR_ALIGN(buf, NTMP_DATA_ADDR_ALIGN);
return 0;
}
static void ntmp_free_data_mem(struct ntmp_dma_buf *data)
{
dma_free_coherent(data->dev, data->size + NTMP_DATA_ADDR_ALIGN,
data->buf, data->dma);
}
static void ntmp_fill_request_hdr(union netc_cbd *cbd, dma_addr_t dma,
int len, int table_id, int cmd,
int access_method)
{
dma_addr_t dma_align;
memset(cbd, 0, sizeof(*cbd));
dma_align = ALIGN(dma, NTMP_DATA_ADDR_ALIGN);
cbd->req_hdr.addr = cpu_to_le64(dma_align);
cbd->req_hdr.len = cpu_to_le32(len);
cbd->req_hdr.cmd = cmd;
cbd->req_hdr.access_method = FIELD_PREP(NTMP_ACCESS_METHOD,
access_method);
cbd->req_hdr.table_id = table_id;
cbd->req_hdr.ver_cci_rr = FIELD_PREP(NTMP_HDR_VERSION,
NTMP_HDR_VER2);
/* For NTMP version 2.0 or later version */
cbd->req_hdr.npf = cpu_to_le32(NTMP_NPF);
}
static void ntmp_fill_crd(struct ntmp_cmn_req_data *crd, u8 tblv,
u8 qa, u16 ua)
{
crd->update_act = cpu_to_le16(ua);
crd->tblv_qact = NTMP_TBLV_QACT(tblv, qa);
}
static void ntmp_fill_crd_eid(struct ntmp_req_by_eid *rbe, u8 tblv,
u8 qa, u16 ua, u32 entry_id)
{
ntmp_fill_crd(&rbe->crd, tblv, qa, ua);
rbe->entry_id = cpu_to_le32(entry_id);
}
static const char *ntmp_table_name(int tbl_id)
{
switch (tbl_id) {
case NTMP_MAFT_ID:
return "MAC Address Filter Table";
case NTMP_RSST_ID:
return "RSS Table";
default:
return "Unknown Table";
};
}
static int ntmp_delete_entry_by_id(struct ntmp_user *user, int tbl_id,
u8 tbl_ver, u32 entry_id, u32 req_len,
u32 resp_len)
{
struct ntmp_dma_buf data = {
.dev = user->dev,
.size = max(req_len, resp_len),
};
struct ntmp_req_by_eid *req;
union netc_cbd cbd;
int err;
err = ntmp_alloc_data_mem(&data, (void **)&req);
if (err)
return err;
ntmp_fill_crd_eid(req, tbl_ver, 0, 0, entry_id);
ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(req_len, resp_len),
tbl_id, NTMP_CMD_DELETE, NTMP_AM_ENTRY_ID);
err = netc_xmit_ntmp_cmd(user, &cbd);
if (err)
dev_err(user->dev,
"Failed to delete entry 0x%x of %s, err: %pe",
entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
ntmp_free_data_mem(&data);
return err;
}
static int ntmp_query_entry_by_id(struct ntmp_user *user, int tbl_id,
u32 len, struct ntmp_req_by_eid *req,
dma_addr_t dma, bool compare_eid)
{
struct ntmp_cmn_resp_query *resp;
int cmd = NTMP_CMD_QUERY;
union netc_cbd cbd;
u32 entry_id;
int err;
entry_id = le32_to_cpu(req->entry_id);
if (le16_to_cpu(req->crd.update_act))
cmd = NTMP_CMD_QU;
/* Request header */
ntmp_fill_request_hdr(&cbd, dma, len, tbl_id, cmd, NTMP_AM_ENTRY_ID);
err = netc_xmit_ntmp_cmd(user, &cbd);
if (err) {
dev_err(user->dev,
"Failed to query entry 0x%x of %s, err: %pe\n",
entry_id, ntmp_table_name(tbl_id), ERR_PTR(err));
return err;
}
/* For a few tables, the first field of their response data is not
* entry_id, so directly return success.
*/
if (!compare_eid)
return 0;
resp = (struct ntmp_cmn_resp_query *)req;
if (unlikely(le32_to_cpu(resp->entry_id) != entry_id)) {
dev_err(user->dev,
"%s: query EID 0x%x doesn't match response EID 0x%x\n",
ntmp_table_name(tbl_id), entry_id, le32_to_cpu(resp->entry_id));
return -EIO;
}
return 0;
}
int ntmp_maft_add_entry(struct ntmp_user *user, u32 entry_id,
struct maft_entry_data *maft)
{
struct ntmp_dma_buf data = {
.dev = user->dev,
.size = sizeof(struct maft_req_add),
};
struct maft_req_add *req;
union netc_cbd cbd;
int err;
err = ntmp_alloc_data_mem(&data, (void **)&req);
if (err)
return err;
/* Set mac address filter table request data buffer */
ntmp_fill_crd_eid(&req->rbe, user->tbl.maft_ver, 0, 0, entry_id);
req->keye = maft->keye;
req->cfge = maft->cfge;
ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
NTMP_MAFT_ID, NTMP_CMD_ADD, NTMP_AM_ENTRY_ID);
err = netc_xmit_ntmp_cmd(user, &cbd);
if (err)
dev_err(user->dev, "Failed to add MAFT entry 0x%x, err: %pe\n",
entry_id, ERR_PTR(err));
ntmp_free_data_mem(&data);
return err;
}
EXPORT_SYMBOL_GPL(ntmp_maft_add_entry);
int ntmp_maft_query_entry(struct ntmp_user *user, u32 entry_id,
struct maft_entry_data *maft)
{
struct ntmp_dma_buf data = {
.dev = user->dev,
.size = sizeof(struct maft_resp_query),
};
struct maft_resp_query *resp;
struct ntmp_req_by_eid *req;
int err;
err = ntmp_alloc_data_mem(&data, (void **)&req);
if (err)
return err;
ntmp_fill_crd_eid(req, user->tbl.maft_ver, 0, 0, entry_id);
err = ntmp_query_entry_by_id(user, NTMP_MAFT_ID,
NTMP_LEN(sizeof(*req), data.size),
req, data.dma, true);
if (err)
goto end;
resp = (struct maft_resp_query *)req;
maft->keye = resp->keye;
maft->cfge = resp->cfge;
end:
ntmp_free_data_mem(&data);
return err;
}
EXPORT_SYMBOL_GPL(ntmp_maft_query_entry);
int ntmp_maft_delete_entry(struct ntmp_user *user, u32 entry_id)
{
return ntmp_delete_entry_by_id(user, NTMP_MAFT_ID, user->tbl.maft_ver,
entry_id, NTMP_EID_REQ_LEN, 0);
}
EXPORT_SYMBOL_GPL(ntmp_maft_delete_entry);
int ntmp_rsst_update_entry(struct ntmp_user *user, const u32 *table,
int count)
{
struct ntmp_dma_buf data = {.dev = user->dev};
struct rsst_req_update *req;
union netc_cbd cbd;
int err, i;
if (count != RSST_ENTRY_NUM)
/* HW only takes in a full 64 entry table */
return -EINVAL;
data.size = struct_size(req, groups, count);
err = ntmp_alloc_data_mem(&data, (void **)&req);
if (err)
return err;
/* Set the request data buffer */
ntmp_fill_crd_eid(&req->rbe, user->tbl.rsst_ver, 0,
NTMP_GEN_UA_CFGEU | NTMP_GEN_UA_STSEU, 0);
for (i = 0; i < count; i++)
req->groups[i] = (u8)(table[i]);
ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(data.size, 0),
NTMP_RSST_ID, NTMP_CMD_UPDATE, NTMP_AM_ENTRY_ID);
err = netc_xmit_ntmp_cmd(user, &cbd);
if (err)
dev_err(user->dev, "Failed to update RSST entry, err: %pe\n",
ERR_PTR(err));
ntmp_free_data_mem(&data);
return err;
}
EXPORT_SYMBOL_GPL(ntmp_rsst_update_entry);
int ntmp_rsst_query_entry(struct ntmp_user *user, u32 *table, int count)
{
struct ntmp_dma_buf data = {.dev = user->dev};
struct ntmp_req_by_eid *req;
union netc_cbd cbd;
int err, i;
u8 *group;
if (count != RSST_ENTRY_NUM)
/* HW only takes in a full 64 entry table */
return -EINVAL;
data.size = NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count) +
RSST_CFGE_DATA_SIZE(count);
err = ntmp_alloc_data_mem(&data, (void **)&req);
if (err)
return err;
/* Set the request data buffer */
ntmp_fill_crd_eid(req, user->tbl.rsst_ver, 0, 0, 0);
ntmp_fill_request_hdr(&cbd, data.dma, NTMP_LEN(sizeof(*req), data.size),
NTMP_RSST_ID, NTMP_CMD_QUERY, NTMP_AM_ENTRY_ID);
err = netc_xmit_ntmp_cmd(user, &cbd);
if (err) {
dev_err(user->dev, "Failed to query RSST entry, err: %pe\n",
ERR_PTR(err));
goto end;
}
group = (u8 *)req;
group += NTMP_ENTRY_ID_SIZE + RSST_STSE_DATA_SIZE(count);
for (i = 0; i < count; i++)
table[i] = group[i];
end:
ntmp_free_data_mem(&data);
return err;
}
EXPORT_SYMBOL_GPL(ntmp_rsst_query_entry);
MODULE_DESCRIPTION("NXP NETC Library");
MODULE_LICENSE("Dual BSD/GPL");
|