/* * "Optimize" a list of dependencies as spit out by gcc -MD * for the kernel build * =========================================================================== * * Author Kai Germaschewski * Copyright 2002 by Kai Germaschewski * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * * Introduction: * * gcc produces a very nice and correct list of dependencies which * tells make when to remake a file. * * To use this list as-is however has the drawback that virtually * every file in the kernel includes autoconf.h. * * If the user re-runs make *config, autoconf.h will be * regenerated. make notices that and will rebuild every file which * includes autoconf.h, i.e. basically all files. This is extremely * annoying if the user just changed CONFIG_HIS_DRIVER from n to m. * * So we play the same trick that "mkdep" played before. We replace * the dependency on autoconf.h by a dependency on every config * option which is mentioned in any of the listed prerequisites. * * kconfig populates a tree in include/config/ with an empty file * for each config symbol and when the configuration is updated * the files representing changed config options are touched * which then let make pick up the changes and the files that use * the config symbols are rebuilt. * * So if the user changes his CONFIG_HIS_DRIVER option, only the objects * which depend on "include/config/his/driver.h" will be rebuilt, * so most likely only his driver ;-) * * The idea above dates, by the way, back to Michael E Chastain, AFAIK. * * So to get dependencies right, there are two issues: * o if any of the files the compiler read changed, we need to rebuild * o if the command line given to the compile the file changed, we * better rebuild as well. * * The former is handled by using the -MD output, the later by saving * the command line used to compile the old object and comparing it * to the one we would now use. * * Again, also this idea is pretty old and has been discussed on * kbuild-devel a long time ago. I don't have a sensibly working * internet connection right now, so I rather don't mention names * without double checking. * * This code here has been based partially based on mkdep.c, which * says the following about its history: * * Copyright abandoned, Michael Chastain, . * This is a C version of syncdep.pl by Werner Almesberger. * * * It is invoked as * * fixdep * * and will read the dependency file * * The transformed dependency snipped is written to stdout. * * It first generates a line * * cmd_ = * * and then basically copies the ..d file to stdout, in the * process filtering out the dependency on autoconf.h and adding * dependencies on include/config/my/option.h for every * CONFIG_MY_OPTION encountered in any of the prerequisites. * * It will also filter out all the dependencies on *.ver. We need * to make sure that the generated version checksum are globally up * to date before even starting the recursive build, so it's too late * at this point anyway. * * We don't even try to really parse the header files, but * merely grep, i.e. if CONFIG_FOO is mentioned in a comment, it will * be picked up as well. It's not a problem with respect to * correctness, since that can only give too many dependencies, thus * we cannot miss a rebuild. Since people tend to not mention totally * unrelated CONFIG_ options all over the place, it's not an * efficiency problem either. * * (Note: it'd be easy to port over the complete mkdep state machine, * but I don't think the added complexity is worth it) */ #include #include #include #include #include #include #include #include #include static void usage(void) { fprintf(stderr, "Usage: fixdep \n"); exit(1); } /* * In the intended usage of this program, the stdout is redirected to .*.cmd * files. The return value of printf() and putchar() must be checked to catch * any error, e.g. "No space left on device". */ static void xprintf(const char *format, ...) { va_list ap; int ret; va_start(ap, format); ret = vprintf(format, ap); if (ret < 0) { perror("fixdep"); exit(1); } va_end(ap); } static void xputchar(int c) { int ret; ret = putchar(c); if (ret == EOF) { perror("fixdep"); exit(1); } } /* * Print out a dependency path from a symbol name */ static void print_dep(const char *m, int slen, const char *dir) { int c, prev_c = '/', i; xprintf(" $(wildcard %s/", dir); for (i = 0; i < slen; i++) { c = m[i]; if (c == '_') c = '/'; else c = tolower(c); if (c != '/' || prev_c != '/') xputchar(c); prev_c = c; } xprintf(".h) \\\n"); } struct item { struct item *next; unsigned int len; unsigned int hash; char name[0]; }; #define HASHSZ 256 static struct item *hashtab[HASHSZ]; static unsigned int strhash(const char *str, unsigned int sz) { /* fnv32 hash */ unsigned int i, hash = 2166136261U; for (i = 0; i < sz; i++) hash = (hash ^ str[i]) * 0x01000193; return hash; } /* * Lookup a value in the configuration string. */ static int is_defined_config(const char *name, int len, unsigned int hash) { struct item *aux; for (aux = hashtab[hash % HASHSZ]; aux; aux = aux->next) { if (aux->hash == hash && aux->len == len && memcmp(aux->name, name, len) == 0) return 1; } return 0; } /* * Add a new value to the configuration string. */ static void define_config(const char *name, int len, unsigned int hash) { struct item *aux = malloc(sizeof(*aux) + len); if (!aux) { perror("fixdep:malloc"); exit(1); } memcpy(aux->name, name, len); aux->len = len; aux->hash = hash; aux->next = hashtab[hash % HASHSZ]; hashtab[hash % HASHSZ] = aux; } /* * Record the use of a CONFIG_* word. */ static void use_config(const char *m, int slen) { unsigned int hash = strhash(m, slen); if (is_defined_config(m, slen, hash)) return; define_config(m, slen, hash); print_dep(m, slen, "include/config"); } /* test if s ends in sub */ static int str_ends_with(const char *s, int slen, const char *sub) { int sublen = strlen(sub); if (sublen > slen) return 0; return !memcmp(s + slen - sublen, sub, sublen); } static void parse_config_file(const char *p) { const char *q, *r; const char *start = p; while ((p = strstr(p, "CONFIG_"))) { if (p > start && (isalnum(p[-1]) || p[-1] == '_')) { p += 7; continue; } p += 7; q = p; while (*q && (isalnum(*q) || *q == '_')) q++; if (str_ends_with(p, q - p, "_MODULE")) r = q - 7; else r = q; if (r > p) use_config(p, r - p); p = q; } } static void *read_file(const char *filename) { struct stat st; int fd; char *buf; fd = open(filename, O_RDONLY); if (fd < 0) { fprintf(stderr, "fixdep: error opening file: "); perror(filename); exit(2); } if (fstat(fd, &st) < 0) { fprintf(stderr, "fixdep: error fstat'ing file: "); perror(filename); exit(2); } buf = malloc(st.st_size + 1); if (!buf) { perror("fixdep: malloc"); exit(2); } if (read(fd, buf, st.st_size) != st.st_size) { perror("fixdep: read"); exit(2); } buf[st.st_size] = '\0'; close(fd); return buf; } /* Ignore certain dependencies */ static int is_ignored_file(const char *s, int len) { return str_ends_with(s, len, "include/generated/autoconf.h") || str_ends_with(s, len, "include/generated/autoksyms.h") || str_ends_with(s, len, ".ver"); } /* * Important: The below generated source_foo.o and deps_foo.o variable * assignments are parsed not only by make, but also by the rather simple * parser in scripts/mod/sumversion.c. */ static void parse_dep_file(char *m, const char *target) { char *p; int is_last, is_target; int saw_any_target = 0; int is_first_dep = 0; void *buf; while (1) { /* Skip any "white space" */ while (*m == ' ' || *m == '\\' || *m == '\n') m++; if (!*m) break; /* Find next "white space" */ p = m; while (*p && *p != ' ' && *p != '\\' && *p != '\n') p++; is_last = (*p == '\0'); /* Is the token we found a target name? */ is_target = (*(p-1) == ':'); /* Don't write any target names into the dependency file */ if (is_target) { /* The /next/ file is the first dependency */ is_first_dep = 1; } else if (!is_ignored_file(m, p - m)) { *p = '\0'; /* * Do not list the source file as dependency, so that * kbuild is not confused if a .c file is rewritten * into .S or vice versa. Storing it in source_* is * needed for modpost to compute srcversions. */ if (is_first_dep) { /* * If processing the concatenation of multiple * dependency files, only process the first * target name, which will be the original * source name, and ignore any other target * names, which will be intermediate temporary * files. */ if (!saw_any_target) { saw_any_target = 1; xprintf("source_%s := %s\n\n", target, m); xprintf("deps_%s := \\\n", target); } is_first_dep = 0; } else { xprintf(" %s \\\n", m); } buf = read_file(m); parse_config_file(buf); free(buf); } if (is_last) break; /* * Start searching for next token immediately after the first * "whitespace" character that follows this token. */ m = p + 1; } if (!saw_any_target) { fprintf(stderr, "fixdep: parse error; no targets found\n"); exit(1); } xprintf("\n%s: $(deps_%s)\n\n", target, target); xprintf("$(deps_%s):\n", target); } int main(int argc, char *argv[]) { const char *depfile, *target, *cmdline; void *buf; if (argc != 4) usage(); depfile = argv[1]; target = argv[2]; cmdline = argv[3]; xprintf("cmd_%s := %s\n\n", target, cmdline); buf = read_file(depfile); parse_dep_file(buf, target); free(buf); return 0; } /> -rw-r--r--drivers/net/wireless/airo.c8264
-rw-r--r--drivers/net/wireless/airo.h9
-rw-r--r--drivers/net/wireless/airo_cs.c485
-rw-r--r--drivers/net/wireless/arlan-main.c1887
-rw-r--r--drivers/net/wireless/arlan-proc.c1253
-rw-r--r--drivers/net/wireless/arlan.h539
-rw-r--r--drivers/net/wireless/ath/Kconfig66
-rw-r--r--drivers/net/wireless/ath/Makefile25
-rw-r--r--drivers/net/wireless/ath/ar5523/Kconfig9
-rw-r--r--drivers/net/wireless/ath/ar5523/Makefile2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c1827
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.h151
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523_hw.h431
-rw-r--r--drivers/net/wireless/ath/ar9170/Kconfig18
-rw-r--r--drivers/net/wireless/ath/ar9170/Makefile3
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h239
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.c129
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.h91
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h422
-rw-r--r--drivers/net/wireless/ath/ar9170/mac.c475
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c1984
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c826
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.h74
-rw-r--r--drivers/net/wireless/ath/ath.h339
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig98
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile45
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c862
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.h76
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c521
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h277
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c1971
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h427
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c3784
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1368
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c1666
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h231
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c2710
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h288
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c779
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h258
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c1324
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h421
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c437
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h2465
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c4474
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c1842
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c1157
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h1205
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.c89
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c10382
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h95
-rw-r--r--drivers/net/wireless/ath/ath10k/p2p.c145
-rw-r--r--drivers/net/wireless/ath/ath10k/p2p.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c3855
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h260
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c1140
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h123
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c2309
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h694
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h1336
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c2678
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h236
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c1895
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h97
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c550
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.h79
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.c195
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h59
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h497
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c662
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.h35
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode_i.h75
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c220
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h53
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h531
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c288
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h27
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c1130
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.h118
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h1710
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c4649
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h2683
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c9641
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h7542
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c642
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.h30
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig60
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile39
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c1319
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.h46
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c1080
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h211
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c2715
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h1354
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.c54
-rw-r--r--drivers/net/wireless/ath/ath11k/coredump.h79
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c401
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.h82
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c113
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h113
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c1803
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h391
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c4905
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h2045
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c890
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.h43
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c1194
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h1694
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c5794
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h101
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c1306
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h45
-rw-r--r--drivers/net/wireless/ath/ath11k/fw.c171
-rw-r--r--drivers/net/wireless/ath/ath11k/fw.h27
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c1455
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h978
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h2494
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c1610
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h520
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c160
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h74
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h162
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c845
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.h298
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c2858
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h439
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c10873
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h184
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c506
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h31
-rw-r--r--drivers/net/wireless/ath/ath11k/p2p.c149
-rw-r--r--drivers/net/wireless/ath/ath11k/p2p.h22
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c1322
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h85
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c865
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.h54
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c670
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h60
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c3366
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h526
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c1066
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h50
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h1506
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c1056
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.h84
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c502
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.h29
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c230
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.h55
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.c12
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h346
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c9909
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h6535
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c883
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.h56
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig62
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile35
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.c510
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.h114
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c1156
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c1051
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h193
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c2321
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h1519
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.c54
-rw-r--r--drivers/net/wireless/ath/ath12k/coredump.h81
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.c358
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c104
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h76
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c1515
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h147
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c6178
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h2076
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.c337
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_sta.h24
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c1841
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h1976
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c4438
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h107
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c4830
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h191
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c1781
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h41
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.c178
-rw-r--r--drivers/net/wireless/ath/ath12k/fw.h37
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c2663
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h1181
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h3045
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c947
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h1171
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.c145
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h200
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h165
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.c795
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.h316
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c1680
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h382
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c14996
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h209
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c659
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.h47
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c147
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.h24
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c1895
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h153
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c570
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h123
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c3917
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h635
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c994
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h123
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h1544
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.c395
-rw-r--r--drivers/net/wireless/ath/ath12k/testmode.h40
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.h179
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c11230
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h6526
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c1029
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.h62
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig50
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile8
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c228
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c754
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h119
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h1505
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c232
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4013
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h198
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c137
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c853
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h72
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c566
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h457
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c546
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c405
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h141
-rw-r--r--drivers/net/wireless/ath/ath5k/gpio.c89
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c504
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c100
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c829
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c343
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c1367
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c2112
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c936
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h324
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c1660
-rw-r--r--drivers/net/wireless/ath/ath5k/rfbuffer.h1236
-rw-r--r--drivers/net/wireless/ath/ath5k/rfgain.h30
-rw-r--r--drivers/net/wireless/ath/ath5k/rfkill.c115
-rw-r--r--drivers/net/wireless/ath/ath5k/sysfs.c123
-rw-r--r--drivers/net/wireless/ath/ath5k/trace.h107
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig66
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile49
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c550
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.h271
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4044
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h66
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h85
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c367
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h992
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c1871
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h147
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif-ops.h187
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c699
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h276
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc-ops.h113
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h684
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c2930
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c1724
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1945
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c1311
-rw-r--r--drivers/net/wireless/ath/ath6kl/recovery.c159
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c1457
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h356
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.c101
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.h40
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.c23
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h327
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c1870
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c1261
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c4160
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h2731
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig216
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile87
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c164
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c1091
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h135
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c849
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_initvals.h606
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c1388
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h1052
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c1019
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c454
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h3166
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c429
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c600
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h620
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h1741
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.c598
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_aic.h60
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_buffalo_initvals.h126
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c1698
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c5632
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h374
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c1196
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c620
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h123
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c1583
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h392
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c1013
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c2168
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h1320
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.c258
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.h65
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_wow.c454
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h1013
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h507
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h1197
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h1250
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h291
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h1233
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h1420
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h760
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h1168
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p1_initvals.h64
-rw-r--r--drivers/net/wireless/ath/ath9k/ar956x_initvals.h1046
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h1340
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1332
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c258
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c984
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c479
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h139
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c1205
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h68
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c1646
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.c167
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.h26
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.c263
-rw-r--r--drivers/net/wireless/ath/ath9k/common-debug.h103
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c245
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.h20
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.c1085
-rw-r--r--drivers/net/wireless/ath/ath9k/common-spectral.h205
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c417
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h90
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c1613
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h290
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c254
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c361
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.h45
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c145
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h70
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c401
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.h105
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c2737
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h450
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c1055
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c985
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c1322
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c520
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c1545
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h143
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h653
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c527
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_debug.c517
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c334
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c1041
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c1912
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c1212
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c544
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h230
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h290
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c5111
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1158
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c1167
-rw-r--r--drivers/net/wireless/ath/ath9k/initvals.h4848
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c573
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c1190
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h309
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4370
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c769
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h178
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1062
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.c421
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h572
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c1756
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h216
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c1358
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h979
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_aic.h164
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_mci.h310
-rw-r--r--drivers/net/wireless/ath/ath9k/reg_wow.h136
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c122
-rw-r--r--drivers/net/wireless/ath/ath9k/tx99.c279
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c662
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c369
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h208
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c348
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c3444
-rw-r--r--drivers/net/wireless/ath/carl9170/Kconfig53
-rw-r--r--drivers/net/wireless/ath/carl9170/Makefile5
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h667
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c222
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.h174
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c882
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.h134
-rw-r--r--drivers/net/wireless/ath/carl9170/eeprom.h (renamed from drivers/net/wireless/ath/ar9170/eeprom.h)51
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c431
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h334
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h271
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h884
-rw-r--r--drivers/net/wireless/ath/carl9170/led.c (renamed from drivers/net/wireless/ath/ar9170/led.c)119
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c534
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2059
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c (renamed from drivers/net/wireless/ath/ar9170/phy.c)1207
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.h564
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c1013
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c1719
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c1231
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h8
-rw-r--r--drivers/net/wireless/ath/carl9170/wlan.h435
-rw-r--r--drivers/net/wireless/ath/debug.c49
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c372
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.h122
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.c436
-rw-r--r--drivers/net/wireless/ath/dfs_pri_detector.h78
-rw-r--r--drivers/net/wireless/ath/hw.c190
-rw-r--r--drivers/net/wireless/ath/key.c618
-rw-r--r--drivers/net/wireless/ath/main.c78
-rw-r--r--drivers/net/wireless/ath/reg.h65
-rw-r--r--drivers/net/wireless/ath/regd.c614
-rw-r--r--drivers/net/wireless/ath/regd.h60
-rw-r--r--drivers/net/wireless/ath/regd_common.h106
-rw-r--r--drivers/net/wireless/ath/spectral_common.h129
-rw-r--r--drivers/net/wireless/ath/testmode_i.h66
-rw-r--r--drivers/net/wireless/ath/trace.c20
-rw-r--r--drivers/net/wireless/ath/trace.h68
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Kconfig19
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile11
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c213
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.h50
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c1076
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h471
-rw-r--r--drivers/net/wireless/ath/wcn36xx/firmware.c125
-rw-r--r--drivers/net/wireless/ath/wcn36xx/firmware.h84
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h4973
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c1699
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c74
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.h33
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c3418
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h171
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c149
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.h46
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode_i.h29
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c734
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.h169
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h342
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig56
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile25
-rw-r--r--drivers/net/wireless/ath/wil6210/boot_loader.h57
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c3313
-rw-r--r--drivers/net/wireless/ath/wil6210/debug.c78
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c2487
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c113
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.c28
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h206
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c829
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c915
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c2014
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c568
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c379
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c688
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c462
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c444
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.h15
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c402
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.c9
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h286
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c2589
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h694
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c1647
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h604
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h1449
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c122
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.c38
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h81
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c4047
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h4221
-rw-r--r--drivers/net/wireless/atmel.c4553
-rw-r--r--drivers/net/wireless/atmel.h43
-rw-r--r--drivers/net/wireless/atmel/Kconfig23
-rw-r--r--drivers/net/wireless/atmel/Makefile2
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c (renamed from drivers/net/wireless/at76c50x-usb.c)859
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.h (renamed from drivers/net/wireless/at76c50x-usb.h)77
-rw-r--r--drivers/net/wireless/atmel_cs.c496
-rw-r--r--drivers/net/wireless/atmel_pci.c88
-rw-r--r--drivers/net/wireless/b43/Kconfig140
-rw-r--r--drivers/net/wireless/b43/Makefile21
-rw-r--r--drivers/net/wireless/b43/leds.c242
-rw-r--r--drivers/net/wireless/b43/pcmcia.c178
-rw-r--r--drivers/net/wireless/b43/pcmcia.h20
-rw-r--r--drivers/net/wireless/b43/phy_a.c612
-rw-r--r--drivers/net/wireless/b43/phy_lp.c544
-rw-r--r--drivers/net/wireless/b43/phy_n.c628
-rw-r--r--drivers/net/wireless/b43/rfkill.c209
-rw-r--r--drivers/net/wireless/b43/rfkill.h52
-rw-r--r--drivers/net/wireless/b43/tables_lpphy.c394
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c2476
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h159
-rw-r--r--drivers/net/wireless/b43legacy/rfkill.c208
-rw-r--r--drivers/net/wireless/b43legacy/rfkill.h59
-rw-r--r--drivers/net/wireless/broadcom/Kconfig19
-rw-r--r--drivers/net/wireless/broadcom/Makefile6
-rw-r--r--drivers/net/wireless/broadcom/b43/Kconfig189
-rw-r--r--drivers/net/wireless/broadcom/b43/Makefile28
-rw-r--r--drivers/net/wireless/broadcom/b43/b43.h (renamed from drivers/net/wireless/b43/b43.h)497
-rw-r--r--drivers/net/wireless/broadcom/b43/bus.c252
-rw-r--r--drivers/net/wireless/broadcom/b43/bus.h96
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.c (renamed from drivers/net/wireless/b43/debugfs.c)193
-rw-r--r--drivers/net/wireless/broadcom/b43/debugfs.h (renamed from drivers/net/wireless/b43/debugfs.h)12
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c (renamed from drivers/net/wireless/b43/dma.c)633
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.h (renamed from drivers/net/wireless/b43/dma.h)49
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.c350
-rw-r--r--drivers/net/wireless/broadcom/b43/leds.h (renamed from drivers/net/wireless/b43/leds.h)35
-rw-r--r--drivers/net/wireless/broadcom/b43/lo.c (renamed from drivers/net/wireless/b43/lo.c)40
-rw-r--r--drivers/net/wireless/broadcom/b43/lo.h (renamed from drivers/net/wireless/b43/lo.h)3
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c (renamed from drivers/net/wireless/b43/main.c)3536
-rw-r--r--drivers/net/wireless/broadcom/b43/main.h (renamed from drivers/net/wireless/b43/main.h)70
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_a.h (renamed from drivers/net/wireless/b43/phy_a.h)27
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ac.c88
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ac.h39
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.c (renamed from drivers/net/wireless/b43/phy_common.c)312
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.h (renamed from drivers/net/wireless/b43/phy_common.h)73
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_g.c (renamed from drivers/net/wireless/b43/phy_g.c)273
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_g.h (renamed from drivers/net/wireless/b43/phy_g.h)6
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ht.c1137
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ht.h142
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lcn.c855
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lcn.h32
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lp.c2693
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lp.h (renamed from drivers/net/wireless/b43/phy_lp.h)70
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c6664
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.h (renamed from drivers/net/wireless/b43/phy_n.h)496
-rw-r--r--drivers/net/wireless/broadcom/b43/pio.c (renamed from drivers/net/wireless/b43/pio.c)290
-rw-r--r--drivers/net/wireless/broadcom/b43/pio.h (renamed from drivers/net/wireless/b43/pio.h)52
-rw-r--r--drivers/net/wireless/broadcom/b43/ppr.c190
-rw-r--r--drivers/net/wireless/broadcom/b43/ppr.h46
-rw-r--r--drivers/net/wireless/broadcom/b43/radio_2055.c1322
-rw-r--r--drivers/net/wireless/broadcom/b43/radio_2055.h260
-rw-r--r--drivers/net/wireless/broadcom/b43/radio_2056.c10305
-rw-r--r--drivers/net/wireless/broadcom/b43/radio_2056.h1101
-rw-r--r--drivers/net/wireless/broadcom/b43/radio_2057.c624
-rw-r--r--drivers/net/wireless/broadcom/b43/radio_2057.h507
-rw-r--r--drivers/net/wireless/broadcom/b43/radio_2059.c351
-rw-r--r--drivers/net/wireless/broadcom/b43/radio_2059.h61
-rw-r--r--drivers/net/wireless/broadcom/b43/rfkill.c57
-rw-r--r--drivers/net/wireless/broadcom/b43/rfkill.h12
-rw-r--r--drivers/net/wireless/broadcom/b43/sdio.c203
-rw-r--r--drivers/net/wireless/broadcom/b43/sdio.h46
-rw-r--r--drivers/net/wireless/broadcom/b43/sysfs.c (renamed from drivers/net/wireless/b43/sysfs.c)36
-rw-r--r--drivers/net/wireless/broadcom/b43/sysfs.h (renamed from drivers/net/wireless/b43/sysfs.h)1
-rw-r--r--drivers/net/wireless/broadcom/b43/tables.c (renamed from drivers/net/wireless/b43/tables.c)17
-rw-r--r--drivers/net/wireless/broadcom/b43/tables.h (renamed from drivers/net/wireless/b43/tables.h)1
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.c2443
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.h (renamed from drivers/net/wireless/b43/tables_lpphy.h)14
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_nphy.c3865
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_nphy.h223
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_phy_ht.c823
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_phy_ht.h27
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_phy_lcn.c711
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_phy_lcn.h25
-rw-r--r--drivers/net/wireless/broadcom/b43/wa.c (renamed from drivers/net/wireless/b43/wa.c)326
-rw-r--r--drivers/net/wireless/broadcom/b43/wa.h (renamed from drivers/net/wireless/b43/wa.h)1
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.c (renamed from drivers/net/wireless/b43/xmit.c)373
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.h (renamed from drivers/net/wireless/b43/xmit.h)155
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/Kconfig (renamed from drivers/net/wireless/b43legacy/Kconfig)21
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/Makefile (renamed from drivers/net/wireless/b43legacy/Makefile)3
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/b43legacy.h (renamed from drivers/net/wireless/b43legacy/b43legacy.h)68
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.c (renamed from drivers/net/wireless/b43legacy/debugfs.c)112
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/debugfs.h (renamed from drivers/net/wireless/b43legacy/debugfs.h)6
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c (renamed from drivers/net/wireless/b43legacy/dma.c)660
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.h (renamed from drivers/net/wireless/b43legacy/dma.h)127
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/ilt.c (renamed from drivers/net/wireless/b43legacy/ilt.c)19
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/ilt.h (renamed from drivers/net/wireless/b43legacy/ilt.h)1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/leds.c (renamed from drivers/net/wireless/b43legacy/leds.c)33
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/leds.h (renamed from drivers/net/wireless/b43legacy/leds.h)3
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c (renamed from drivers/net/wireless/b43legacy/main.c)609
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.h (renamed from drivers/net/wireless/b43legacy/main.h)17
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/phy.c (renamed from drivers/net/wireless/b43legacy/phy.c)59
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/phy.h (renamed from drivers/net/wireless/b43legacy/phy.h)17
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/pio.c (renamed from drivers/net/wireless/b43legacy/pio.c)40
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/pio.h (renamed from drivers/net/wireless/b43legacy/pio.h)13
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/radio.c (renamed from drivers/net/wireless/b43legacy/radio.c)99
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/radio.h (renamed from drivers/net/wireless/b43legacy/radio.h)18
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/rfkill.c78
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/rfkill.h12
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/sysfs.c (renamed from drivers/net/wireless/b43legacy/sysfs.c)42
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/sysfs.h (renamed from drivers/net/wireless/b43legacy/sysfs.h)1
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/xmit.c (renamed from drivers/net/wireless/b43legacy/xmit.c)55
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/xmit.h (renamed from drivers/net/wireless/b43legacy/xmit.h)11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Kconfig44
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Makefile13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Kconfig50
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile62
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/acpi.c51
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/Makefile12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/core.c39
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/module.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/vops.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c490
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c1269
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c481
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.h18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h342
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c8499
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h498
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c1472
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h92
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c637
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h95
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.c236
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/commonring.h62
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c1576
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h228
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/Makefile12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/core.c373
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/fwil_types.h87
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/module.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/vops.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c56
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.h143
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c229
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c385
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h131
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c863
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h95
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c508
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h74
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c514
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h401
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c432
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h224
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h1239
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c2514
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h48
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.c200
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwvid.h88
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c1687
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h43
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c155
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c2538
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h178
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c2785
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.c595
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pno.h72
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c71
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h155
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c4650
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h370
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.c32
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h144
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c1604
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.h42
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c118
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.h53
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/Makefile12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/core.c40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/module.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/vops.h11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c84
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.h31
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile47
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c707
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.h221
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c1097
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.h53
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c309
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.h27
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h102
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h92
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h110
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_events.c23
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_events.h40
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c771
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.h47
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/d11.h1902
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.c246
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/debug.h75
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c1556
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.h124
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c112
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h38
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c1738
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.h113
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c8065
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.h670
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c2497
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h241
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h1113
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c5151
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.h110
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c28572
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.c298
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_qmath.h31
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_radio.h1522
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phyreg_n.h156
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c3170
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.h44
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c10099
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.h39
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c215
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.h172
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.c165
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/pmu.h26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/pub.h341
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/rate.c514
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/rate.h245
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/scb.h67
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.c437
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h36
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/types.h294
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ucode_loader.c109
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ucode_loader.h56
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c247
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c327
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h115
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_d11.h156
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h216
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h247
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h311
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/defs.h94
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/soc.h25
-rw-r--r--drivers/net/wireless/hostap/Kconfig97
-rw-r--r--drivers/net/wireless/hostap/Makefile7
-rw-r--r--drivers/net/wireless/hostap/hostap.h95
-rw-r--r--drivers/net/wireless/hostap/hostap_80211.h92
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c1116
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_tx.c547
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c3271
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.h263
-rw-r--r--drivers/net/wireless/hostap/hostap_common.h419
-rw-r--r--drivers/net/wireless/hostap/hostap_config.h48
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c902
-rw-r--r--drivers/net/wireless/hostap/hostap_download.c762
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c3405
-rw-r--r--drivers/net/wireless/hostap/hostap_info.c504
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c4057
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c1150
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c472
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c632
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c457
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h1046
-rw-r--r--drivers/net/wireless/i82586.h413
-rw-r--r--drivers/net/wireless/i82593.h229
-rw-r--r--drivers/net/wireless/intel/Kconfig19
-rw-r--r--drivers/net/wireless/intel/Makefile8
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Kconfig (renamed from drivers/net/wireless/ipw2x00/Kconfig)156
-rw-r--r--drivers/net/wireless/intel/ipw2x00/Makefile (renamed from drivers/net/wireless/ipw2x00/Makefile)8
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw.h20
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c (renamed from drivers/net/wireless/ipw2x00/ipw2100.c)1331
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.h (renamed from drivers/net/wireless/ipw2x00/ipw2100.h)82
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c (renamed from drivers/net/wireless/ipw2x00/ipw2200.c)2720
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h (renamed from drivers/net/wireless/ipw2x00/ipw2200.h)207
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h1097
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto.c246
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_ccmp.c438
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_tkip.c728
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_crypto_wep.c247
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_geo.c (renamed from drivers/net/wireless/ipw2x00/libipw_geo.c)102
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_module.c329
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_rx.c (renamed from drivers/net/wireless/ipw2x00/libipw_rx.c)642
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_spy.c233
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_tx.c (renamed from drivers/net/wireless/ipw2x00/libipw_tx.c)128
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_wx.c (renamed from drivers/net/wireless/ipw2x00/libipw_wx.c)212
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-debug.c493
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c3886
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c948
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2725
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.h576
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-calib.c934
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-debug.c734
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c6847
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c2792
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c1935
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.h1265
-rw-r--r--drivers/net/wireless/intel/iwlegacy/Kconfig (renamed from drivers/net/wireless/iwlwifi/Kconfig)141
-rw-r--r--drivers/net/wireless/intel/iwlegacy/Makefile16
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h (renamed from drivers/net/wireless/iwlwifi/iwl-commands.h)2293
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c5551
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h3064
-rw-r--r--drivers/net/wireless/intel/iwlegacy/csr.h419
-rw-r--r--drivers/net/wireless/intel/iwlegacy/debug.c1379
-rw-r--r--drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h53
-rw-r--r--drivers/net/wireless/intel/iwlegacy/prph.h (renamed from drivers/net/wireless/iwlwifi/iwl-prph.h)201
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig185
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c120
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c199
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c116
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c159
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c357
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c235
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c136
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c103
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c123
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c108
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c85
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-jf.c111
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/Makefile14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h439
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.c (renamed from drivers/net/wireless/iwlwifi/iwl-calib.c)717
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h3939
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c2383
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h927
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c675
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c1144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c208
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c1258
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c1628
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c2183
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c377
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rs.c)1809
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rs.h)222
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c1021
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c1547
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c1062
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c1425
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c657
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.h109
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c1392
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c416
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c1083
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h271
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h175
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h124
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h156
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h154
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h662
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h112
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/context.h50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h997
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h701
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h584
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h556
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dhc.h226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/filter.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/led.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h1943
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h993
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h793
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h693
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/paging.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h177
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h237
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h790
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rfi.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h892
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h1330
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h1316
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sf.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h497
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h1090
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/system.h43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h161
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h519
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h979
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c3480
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h344
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c453
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dhc-utils.h75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c508
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h576
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h1084
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h285
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c164
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c135
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h97
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c296
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c481
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c743
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h267
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c161
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h256
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c114
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c845
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h393
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h752
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h660
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c1404
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.c87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h221
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h194
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h115
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c2174
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h115
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h735
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c439
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h107
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h128
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c2121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h189
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c119
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.h71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h300
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c438
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h542
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-scd.h84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c824
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h1265
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.c195
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-utils.h58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/Makefile8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/internal.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h542
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c2216
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/net.c412
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h784
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/trace-data.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/trace.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/trace.h76
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/Makefile12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.c677
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/agg.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.c363
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ap.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/coex.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/constants.h81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c2065
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.h51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c1113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.h244
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.c451
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ftm-initiator.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c548
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/hcmd.h56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c703
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h250
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.c408
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.h46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/led.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c909
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.c336
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/low_latency.h68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c2702
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.c285
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mcc.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c776
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h615
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c1199
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.h171
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c719
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.c198
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/phy.h60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.c391
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/power.h33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.c321
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/ptp.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c383
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c261
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c2264
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.h73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c2179
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.h173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.c222
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/session-protect.h102
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c1319
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.h273
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c521
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.h22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/Makefile5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/agg.c663
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/hcmd.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link-selection.c339
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/link.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/module.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/rx.c353
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.c503
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tests/utils.h140
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.c467
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/thermal.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.c240
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/time_sync.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c739
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.c1394
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tx.h77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c714
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h124
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c3303
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c911
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c2200
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c1454
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c430
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c1637
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c119
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c319
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c1916
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c6489
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-key.c408
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac.c335
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c1283
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c1226
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2902
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c642
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c214
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2143
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c395
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c991
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ptp.c326
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/quota.c258
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rfi.c157
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c735
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c4180
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h455
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c1240
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c2631
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c3647
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c288
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c4318
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h662
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c664
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/testmode.h (renamed from drivers/net/wireless/iwlwifi/iwl-6000-hw.h)59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/hcmd.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c1502
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h229
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-sync.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-sync.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c864
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c2312
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c1409
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/vendor-cmd.c153
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-v2.c618
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c264
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c1345
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h1172
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/rx.c2488
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c654
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c4367
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx-gen2.c1434
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c2689
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info-v2.h344
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/iwl-context-info.h197
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.c104
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/utils.h40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c316
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/module.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/utils.c109
-rw-r--r--drivers/net/wireless/intersil/Kconfig17
-rw-r--r--drivers/net/wireless/intersil/Makefile2
-rw-r--r--drivers/net/wireless/intersil/p54/Kconfig (renamed from drivers/net/wireless/p54/Kconfig)42
-rw-r--r--drivers/net/wireless/intersil/p54/Makefile (renamed from drivers/net/wireless/p54/Makefile)4
-rw-r--r--drivers/net/wireless/intersil/p54/eeprom.c981
-rw-r--r--drivers/net/wireless/intersil/p54/eeprom.h242
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c761
-rw-r--r--drivers/net/wireless/intersil/p54/led.c158
-rw-r--r--drivers/net/wireless/intersil/p54/lmac.h559
-rw-r--r--drivers/net/wireless/intersil/p54/main.c867
-rw-r--r--drivers/net/wireless/intersil/p54/p54.h (renamed from drivers/net/wireless/p54/p54.h)195
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.c (renamed from drivers/net/wireless/p54/p54pci.c)356
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.h (renamed from drivers/net/wireless/p54/p54pci.h)19
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c (renamed from drivers/net/wireless/p54/p54spi.c)199
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.h (renamed from drivers/net/wireless/p54/p54spi.h)17
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi_eeprom.h (renamed from drivers/net/wireless/p54/p54spi_eeprom.h)24
-rw-r--r--drivers/net/wireless/intersil/p54/p54usb.c (renamed from drivers/net/wireless/p54/p54usb.c)330
-rw-r--r--drivers/net/wireless/intersil/p54/p54usb.h (renamed from drivers/net/wireless/p54/p54usb.h)27
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c947
-rw-r--r--drivers/net/wireless/ipw2x00/ieee80211.h1087
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c296
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c73
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-fh.h188
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h311
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c435
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h46
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c1034
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c3011
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h314
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h819
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c2391
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h117
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1734
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c159
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c3012
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.h84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c2785
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h592
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h301
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h216
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c659
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h1180
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c733
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h407
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h499
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c295
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h148
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h410
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c443
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c278
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h68
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.c144
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.h48
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c1139
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c840
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.c198
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.h92
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c1185
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h70
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c1491
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c4558
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig23
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Makefile5
-rw-r--r--drivers/net/wireless/iwmc3200wifi/bus.h57
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c409
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.h31
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c920
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h419
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debug.h124
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c453
-rw-r--r--drivers/net/wireless/iwmc3200wifi/eeprom.c187
-rw-r--r--drivers/net/wireless/iwmc3200wifi/eeprom.h114
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.c388
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.h100
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c464
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.h236
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h350
-rw-r--r--drivers/net/wireless/iwmc3200wifi/lmac.h457
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c680
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c172
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rfkill.c88
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c1431
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.h60
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c516
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.h67
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c492
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h744
-rw-r--r--drivers/net/wireless/iwmc3200wifi/wext.c723
-rw-r--r--drivers/net/wireless/libertas/11d.c698
-rw-r--r--drivers/net/wireless/libertas/11d.h104
-rw-r--r--drivers/net/wireless/libertas/assoc.c1834
-rw-r--r--drivers/net/wireless/libertas/assoc.h29
-rw-r--r--drivers/net/wireless/libertas/cmd.c2124
-rw-r--r--drivers/net/wireless/libertas/cmd.h87
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c599
-rw-r--r--drivers/net/wireless/libertas/decl.h77
-rw-r--r--drivers/net/wireless/libertas/dev.h412
-rw-r--r--drivers/net/wireless/libertas/ethtool.c196
-rw-r--r--drivers/net/wireless/libertas/host.h305
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h795
-rw-r--r--drivers/net/wireless/libertas/if_cs.c1020
-rw-r--r--drivers/net/wireless/libertas/main.c1733
-rw-r--r--drivers/net/wireless/libertas/persistcfg.c453
-rw-r--r--drivers/net/wireless/libertas/scan.c1209
-rw-r--r--drivers/net/wireless/libertas/scan.h33
-rw-r--r--drivers/net/wireless/libertas/types.h286
-rw-r--r--drivers/net/wireless/libertas/wext.c2266
-rw-r--r--drivers/net/wireless/libertas/wext.h10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1188
-rw-r--r--drivers/net/wireless/marvell/Kconfig28
-rw-r--r--drivers/net/wireless/marvell/Makefile7
-rw-r--r--drivers/net/wireless/marvell/libertas/Kconfig38
-rw-r--r--drivers/net/wireless/marvell/libertas/LICENSE (renamed from drivers/net/wireless/libertas/LICENSE)0
-rw-r--r--drivers/net/wireless/marvell/libertas/Makefile (renamed from drivers/net/wireless/libertas/Makefile)14
-rw-r--r--drivers/net/wireless/marvell/libertas/README (renamed from drivers/net/wireless/libertas/README)36
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c2220
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.h22
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c1471
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.h127
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c352
-rw-r--r--drivers/net/wireless/marvell/libertas/debugfs.c (renamed from drivers/net/wireless/libertas/debugfs.c)440
-rw-r--r--drivers/net/wireless/marvell/libertas/debugfs.h (renamed from drivers/net/wireless/libertas/debugfs.h)1
-rw-r--r--drivers/net/wireless/marvell/libertas/decl.h78
-rw-r--r--drivers/net/wireless/marvell/libertas/defs.h (renamed from drivers/net/wireless/libertas/defs.h)164
-rw-r--r--drivers/net/wireless/marvell/libertas/dev.h208
-rw-r--r--drivers/net/wireless/marvell/libertas/ethtool.c115
-rw-r--r--drivers/net/wireless/marvell/libertas/firmware.c228
-rw-r--r--drivers/net/wireless/marvell/libertas/host.h983
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c (renamed from drivers/net/wireless/libertas/if_sdio.c)975
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.h (renamed from drivers/net/wireless/libertas/if_sdio.h)13
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c (renamed from drivers/net/wireless/libertas/if_spi.c)872
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.h (renamed from drivers/net/wireless/libertas/if_spi.h)82
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c (renamed from drivers/net/wireless/libertas/if_usb.c)472
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.h (renamed from drivers/net/wireless/libertas/if_usb.h)18
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c1073
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c1161
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.h76
-rw-r--r--drivers/net/wireless/marvell/libertas/radiotap.h (renamed from drivers/net/wireless/libertas/radiotap.h)9
-rw-r--r--drivers/net/wireless/marvell/libertas/rx.c (renamed from drivers/net/wireless/libertas/rx.c)216
-rw-r--r--drivers/net/wireless/marvell/libertas/tx.c (renamed from drivers/net/wireless/libertas/tx.c)78
-rw-r--r--drivers/net/wireless/marvell/libertas/types.h270
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/Kconfig19
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/Makefile (renamed from drivers/net/wireless/libertas_tf/Makefile)1
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/cmd.c (renamed from drivers/net/wireless/libertas_tf/cmd.c)246
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/deb_defs.h104
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c (renamed from drivers/net/wireless/libertas_tf/if_usb.c)387
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.h (renamed from drivers/net/wireless/libertas_tf/if_usb.h)8
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/libertas_tf.h (renamed from drivers/net/wireless/libertas_tf/libertas_tf.h)73
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c (renamed from drivers/net/wireless/libertas_tf/main.c)328
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.c370
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11ac.h33
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c294
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c912
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.h177
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.c294
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_aggr.h21
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c983
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.h74
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Kconfig46
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Makefile47
-rw-r--r--drivers/net/wireless/marvell/mwifiex/README274
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c4923
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.h17
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c526
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c1691
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c1006
-rw-r--r--drivers/net/wireless/marvell/mwifiex/decl.h324
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ethtool.c58
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h2466
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c503
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c718
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ioctl.h481
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c1601
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c1877
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h1700
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c3283
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h290
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie_quirks.c147
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie_quirks.h9
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c2966
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c3214
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h379
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c2535
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c1443
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c1098
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c1551
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c271
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_tx.c222
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c1561
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c374
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c1065
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c327
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c530
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c1613
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.h128
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c843
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.h89
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c1532
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.h99
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c6449
-rw-r--r--drivers/net/wireless/mediatek/Kconfig16
-rw-r--r--drivers/net/wireless/mediatek/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig51
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile48
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c303
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c416
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c126
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c1192
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h214
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c467
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c2123
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c161
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c114
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h2110
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Kconfig12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Makefile7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c225
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c67
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c118
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c260
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c187
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h91
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c571
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c1885
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.h242
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c755
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c432
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.h103
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h262
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/pci.c83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h780
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c79
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c609
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c316
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c356
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h116
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c650
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c2360
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h325
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c1372
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c2566
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h243
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c275
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h557
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h56
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c206
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c128
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c295
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h621
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c258
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c180
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c70
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c376
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/trace.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c286
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c100
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c353
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h461
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h365
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c268
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h400
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c1210
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c3266
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h2070
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/Makefile11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c350
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c266
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h85
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals_init.h159
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h633
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c93
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mcu.h46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h63
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c327
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c133
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c1215
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.h90
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c359
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c175
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h284
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c215
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c157
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c892
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h132
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dma.h65
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c147
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h185
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c1238
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h208
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c171
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h100
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c565
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c204
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h49
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h708
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.h87
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c183
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c286
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c297
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c699
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Makefile15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c512
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h83
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c204
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.c46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.h24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c108
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h68
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h43
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c187
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c320
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c157
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c198
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c311
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c349
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c157
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c261
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c174
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c118
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c255
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.c411
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.h136
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c1487
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c655
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c409
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h203
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c1304
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c2402
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h75
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c1818
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c4126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h524
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c1004
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h621
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c242
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h1229
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c1312
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c790
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.h105
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Kconfig37
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c296
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c350
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c868
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c1590
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c1492
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h120
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h338
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c585
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c129
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c63
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c333
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c199
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c175
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/testmode.c196
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c348
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Kconfig30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Makefile10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/debugfs.c319
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c281
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c1497
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c2229
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c3903
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h714
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h380
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c639
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c151
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mcu.c53
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regd.c265
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regd.h19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regs.h92
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/testmode.c201
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c346
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h537
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c486
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h123
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c986
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c168
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c372
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_mac.c384
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_regs.h487
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_trace.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_trace.h51
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_usb.c317
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Kconfig21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Makefile9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.c268
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.h97
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c1056
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c1005
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c405
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h79
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c1745
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c3321
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.h45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c2340
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c4889
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h974
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c892
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h902
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/npu.c352
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c269
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h791
-rw-r--r--drivers/net/wireless/mediatek/mt76/npu.c501
-rw-r--r--drivers/net/wireless/mediatek/mt76/pci.c70
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c174
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c681
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.h140
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c390
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c676
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h200
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.h111
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c929
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c1139
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h86
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c138
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h115
-rw-r--r--drivers/net/wireless/mediatek/mt76/wed.c209
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/Kconfig7
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/Makefile8
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/core.c70
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/debugfs.c138
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c551
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.h117
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.c390
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/eeprom.h143
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c631
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/initvals.h157
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/initvals_phy.h283
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c593
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.h171
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c432
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.c535
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mcu.h86
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mt7601u.h393
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c1258
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/regs.h627
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/trace.c13
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/trace.h392
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/tx.c316
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.c382
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.h71
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/util.c34
-rw-r--r--drivers/net/wireless/microchip/Kconfig15
-rw-r--r--drivers/net/wireless/microchip/Makefile2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/Kconfig48
-rw-r--r--drivers/net/wireless/microchip/wilc1000/Makefile11
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c1892
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.h26
-rw-r--r--drivers/net/wireless/microchip/wilc1000/fw.h143
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c2021
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h220
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c258
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c1025
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h305
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c1076
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c1374
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c1810
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h475
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c433
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.h55
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_if.h814
-rw-r--r--drivers/net/wireless/mwl8k.c3778
-rw-r--r--drivers/net/wireless/netwave_cs.c1387
-rw-r--r--drivers/net/wireless/orinoco/Kconfig120
-rw-r--r--drivers/net/wireless/orinoco/Makefile13
-rw-r--r--drivers/net/wireless/orinoco/airport.c291
-rw-r--r--drivers/net/wireless/orinoco/fw.c390
-rw-r--r--drivers/net/wireless/orinoco/fw.h21
-rw-r--r--drivers/net/wireless/orinoco/hermes.c536
-rw-r--r--drivers/net/wireless/orinoco/hermes.h499
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.c706
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.h52
-rw-r--r--drivers/net/wireless/orinoco/hermes_rid.h165
-rw-r--r--drivers/net/wireless/orinoco/hw.c584
-rw-r--r--drivers/net/wireless/orinoco/hw.h47
-rw-r--r--drivers/net/wireless/orinoco/main.c2667
-rw-r--r--drivers/net/wireless/orinoco/main.h63
-rw-r--r--drivers/net/wireless/orinoco/mic.c79
-rw-r--r--drivers/net/wireless/orinoco/mic.h22
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h218
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c541
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c323
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c265
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.h109
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c370
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c248
-rw-r--r--drivers/net/wireless/orinoco/scan.c233
-rw-r--r--drivers/net/wireless/orinoco/scan.h29
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c529
-rw-r--r--drivers/net/wireless/orinoco/wext.c2325
-rw-r--r--drivers/net/wireless/orinoco/wext.h13
-rw-r--r--drivers/net/wireless/p54/net2280.h452
-rw-r--r--drivers/net/wireless/p54/p54common.c2687
-rw-r--r--drivers/net/wireless/p54/p54common.h644
-rw-r--r--drivers/net/wireless/prism54/Makefile8
-rw-r--r--drivers/net/wireless/prism54/isl_38xx.c256
-rw-r--r--drivers/net/wireless/prism54/isl_38xx.h171
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c3247
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.h50
-rw-r--r--drivers/net/wireless/prism54/isl_oid.h505
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c957
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.h216
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c514
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.h72
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c343
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c510
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.h138
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c904
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.h59
-rw-r--r--drivers/net/wireless/prism54/prismcompat.h43
-rw-r--r--drivers/net/wireless/purelifi/Kconfig17
-rw-r--r--drivers/net/wireless/purelifi/Makefile2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/Kconfig14
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/Makefile3
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/chip.c98
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/chip.h70
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/firmware.c276
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/intf.h52
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c759
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.h184
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c896
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.h198
-rw-r--r--drivers/net/wireless/quantenna/Kconfig17
-rw-r--r--drivers/net/wireless/quantenna/Makefile7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Kconfig21
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Makefile30
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h158
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c1291
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.h18
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c2770
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h82
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c933
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h159
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/debug.c23
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/debug.h37
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c797
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.h14
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c493
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h90
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c1199
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_ipc.h82
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie_regs.h121
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c1236
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h94
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_regs.h45
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h1891
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.c303
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h73
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h35
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/shm_ipc.c162
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/shm_ipc.h65
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/shm_ipc_defs.h33
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/switchdev.h24
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/trans.c213
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/trans.h44
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.c123
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/util.h34
-rw-r--r--drivers/net/wireless/ralink/Kconfig17
-rw-r--r--drivers/net/wireless/ralink/Makefile2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Kconfig265
-rw-r--r--drivers/net/wireless/ralink/rt2x00/Makefile (renamed from drivers/net/wireless/rt2x00/Makefile)8
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c (renamed from drivers/net/wireless/rt2x00/rt2400pci.c)1015
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.h (renamed from drivers/net/wireless/rt2x00/rt2400pci.h)56
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c (renamed from drivers/net/wireless/rt2x00/rt2500pci.c)1089
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.h (renamed from drivers/net/wireless/rt2x00/rt2500pci.h)46
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c (renamed from drivers/net/wireless/rt2x00/rt2500usb.c)967
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.h (renamed from drivers/net/wireless/rt2x00/rt2500usb.h)51
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800.h3201
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c12347
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h276
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c859
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.h155
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c468
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.h31
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c366
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1276
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.h99
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1509
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00config.c280
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c (renamed from drivers/net/wireless/rt2x00/rt2x00crypto.c)64
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.c (renamed from drivers/net/wireless/rt2x00/rt2x00debug.c)379
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00debug.h (renamed from drivers/net/wireless/rt2x00/rt2x00debug.h)22
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c1642
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dump.h (renamed from drivers/net/wireless/rt2x00/rt2x00dump.h)33
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00firmware.c (renamed from drivers/net/wireless/rt2x00/rt2x00firmware.c)51
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00leds.c (renamed from drivers/net/wireless/rt2x00/rt2x00leds.c)35
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00leds.h33
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00lib.h (renamed from drivers/net/wireless/rt2x00/rt2x00lib.h)216
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00link.c428
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c840
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c201
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h103
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00pci.c197
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00pci.h27
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c1287
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.h (renamed from drivers/net/wireless/rt2x00/rt2x00queue.h)256
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00reg.h (renamed from drivers/net/wireless/rt2x00/rt2x00reg.h)38
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c910
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.h (renamed from drivers/net/wireless/rt2x00/rt2x00usb.h)179
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c (renamed from drivers/net/wireless/rt2x00/rt61pci.c)1551
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.h (renamed from drivers/net/wireless/rt2x00/rt61pci.h)147
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c (renamed from drivers/net/wireless/rt2x00/rt73usb.c)1025
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.h (renamed from drivers/net/wireless/rt2x00/rt73usb.h)100
-rw-r--r--drivers/net/wireless/ray_cs.c2983
-rw-r--r--drivers/net/wireless/ray_cs.h75
-rw-r--r--drivers/net/wireless/rayctl.h732
-rw-r--r--drivers/net/wireless/realtek/Kconfig21
-rw-r--r--drivers/net/wireless/realtek/Makefile12
-rw-r--r--drivers/net/wireless/realtek/rtl818x/Kconfig89
-rw-r--r--drivers/net/wireless/realtek/rtl818x/Makefile3
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile6
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c1991
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.c (renamed from drivers/net/wireless/rtl818x/rtl8180_grf5101.c)25
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.h (renamed from drivers/net/wireless/rtl818x/rtl8180_grf5101.h)7
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.c (renamed from drivers/net/wireless/rtl818x/rtl8180_max2820.c)31
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.h (renamed from drivers/net/wireless/rtl818x/rtl8180_max2820.h)7
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h (renamed from drivers/net/wireless/rtl818x/rtl8180.h)98
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c (renamed from drivers/net/wireless/rtl818x/rtl8180_rtl8225.c)43
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.h (renamed from drivers/net/wireless/rtl818x/rtl8180_rtl8225.h)1
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c430
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.h58
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c (renamed from drivers/net/wireless/rtl818x/rtl8180_sa2400.c)41
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.h (renamed from drivers/net/wireless/rtl818x/rtl8180_sa2400.h)7
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile6
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c (renamed from drivers/net/wireless/rtl818x/rtl8187_dev.c)585
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c (renamed from drivers/net/wireless/rtl818x/rtl8187_leds.c)90
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/leds.h (renamed from drivers/net/wireless/rtl818x/rtl8187_leds.h)9
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c61
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.h9
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h (renamed from drivers/net/wireless/rtl818x/rtl8187.h)149
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c (renamed from drivers/net/wireless/rtl818x/rtl8187_rtl8225.c)149
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.h (renamed from drivers/net/wireless/rtl818x/rtl8187_rtl8225.h)9
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl818x.h402
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188e.c1885
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188f.c1765
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192c.c741
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192e.c1767
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192f.c2091
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8710b.c1875
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8723a.c648
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8723b.c1767
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Kconfig37
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Makefile6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c8239
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/regs.h1382
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h2174
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Kconfig171
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Makefile35
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c2732
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h129
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/Makefile10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h64
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c3196
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.h163
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c3437
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.h203
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c4240
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.h183
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c2967
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.h171
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c4205
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.h211
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.c44
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8822bwifionly.h14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c1893
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h824
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c534
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h54
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c321
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.h28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c1958
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.h66
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c514
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.h209
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c1373
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.h98
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c2363
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.h282
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c983
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.h28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pwrseqcmd.h72
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c318
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.h26
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c434
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.h41
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/Makefile14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/def.h183
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c1784
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.h260
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c721
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.h233
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c2515
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.h41
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c111
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c2293
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.h211
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.c83
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/pwrseq.h289
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/reg.h2249
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c486
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.h20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c390
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.c615
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/table.h19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c832
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h801
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/Makefile8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c1738
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.h121
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c770
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.h75
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c1615
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.h227
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/Makefile12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/def.h113
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c84
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.h60
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c2324
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.h53
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c106
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c517
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.h116
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/reg.h2030
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c482
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.h16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c370
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c1195
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.h30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c686
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.h531
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/Makefile13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/def.h20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c90
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.h9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c2171
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h83
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c80
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c747
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h132
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c509
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.h24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/reg.h4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c442
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.h23
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c385
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.c1862
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/table.h45
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c643
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h397
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/Makefile11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/def.h175
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.c1061
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.h79
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.c370
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.h49
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c1225
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/main.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.c856
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.h111
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/reg.h1393
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.c359
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c516
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h405
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/Makefile13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c184
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c320
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c1087
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c114
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c2726
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h89
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c227
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c397
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.c1663
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/table.h30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c314
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h136
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/Makefile13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.c120
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.c63
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.h9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c1212
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.h22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.h9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c3118
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h31
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.c240
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c394
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.c1675
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.h29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.c372
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.h60
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/Makefile15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/def.h71
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c1086
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.h229
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c863
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.h159
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c2609
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.h41
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c105
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c3164
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.h130
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.c90
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/pwrseq.h318
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/reg.h2211
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c129
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c378
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.c858
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/table.h21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c1045
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h744
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/Makefile14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/def.h431
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c717
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.h67
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c616
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.h350
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c2512
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.h53
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c106
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c1610
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.h77
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/reg.h1143
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c507
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.h18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c418
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.c607
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/table.h31
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c638
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.h24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/Makefile17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/btc.h15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/def.h160
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c840
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.h110
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c581
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.h41
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c515
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h133
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c1756
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.h138
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c2393
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.h46
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c114
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c1665
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.h180
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.c90
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/pwrseq.h318
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/reg.h2098
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c491
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.h17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c386
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c704
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c687
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h534
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/Makefile15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/def.h62
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c1266
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.h248
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c685
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.h123
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c2724
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.h41
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c112
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.h12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c2656
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.h113
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.c84
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/pwrseq.h401
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/reg.h2273
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c489
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.h20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c394
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c597
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c743
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h646
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/Makefile8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.c43
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/dm_common.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c217
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.h70
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/main.c11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c411
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.h66
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/Makefile15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/def.h269
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c2795
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.h293
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c1901
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h293
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c4038
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.h48
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c191
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.h14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c4787
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.h236
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.c160
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/pwrseq.h716
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h2444
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c440
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.h20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c446
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c4033
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h34
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c990
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h652
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/stats.c247
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/stats.h20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c1079
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h142
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h3038
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig271
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile113
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c416
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.h124
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c4188
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.h433
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c1375
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h76
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.c187
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.h29
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c2468
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h900
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h289
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.c74
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.h25
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c1421
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h51
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c991
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c2488
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h2280
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c1887
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h280
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c2679
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h202
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c343
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.h28
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h1004
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.c541
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.h73
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c2003
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.h102
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b_tables.c902
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b_tables.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723cs.c34
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c2207
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h39
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d_table.c1196
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d_table.h15
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723de.c32
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723ds.c41
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723du.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c788
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.h524
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.c1125
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a_table.c2812
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812a_table.h26
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8812au.c94
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.c2270
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a.h62
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.c23930
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814a_table.h40
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814ae.c31
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8814au.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.c1226
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a.h10
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a_table.c2350
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821a_table.h21
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821au.c78
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c2064
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.h286
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c_table.c7504
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c_table.h16
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821ce.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cs.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821cu.c60
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c2612
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h189
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.c22204
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.h20
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822be.c32
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bs.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c104
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c5445
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h429
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c46105
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.h21
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822ce.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cs.c36
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cu.c46
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.c1989
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw88xxa.h175
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c314
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h65
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.c114
-rw-r--r--drivers/net/wireless/realtek/rtw88/sar.h22
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c1437
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.h178
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.c145
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.h40
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c752
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h143
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c1381
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.h103
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c208
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.h43
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.c913
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.h58
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig181
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile98
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c1281
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h298
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c1142
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h352
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c3448
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h223
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c11906
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h387
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c6648
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h7686
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c4869
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h100
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.c506
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.h30
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse_be.c514
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c10212
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h5333
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c7240
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h1722
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c2006
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c2657
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c4842
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h1930
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c708
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c8089
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h1064
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c1021
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c591
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h52
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h9522
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c1185
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c2721
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.h76
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c3784
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.h34
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.c499
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk_table.h38
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.c14916
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_table.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c102
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851bu.c66
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c2406
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.h110
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c3866
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h28
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c1607
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h86
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.c51062
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c104
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852au.c79
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c1033
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c2106
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.h400
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c4212
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.h34
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk_table.c794
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk_table.h62
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_table.c22927
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_table.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c106
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c890
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c4279
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h34
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c490
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h38
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c108
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bu.c81
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c3251
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.h103
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c4491
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.h37
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.c781
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk_table.h67
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.c57159
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c131
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852cu.c69
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c3009
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.h74
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c353
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c119
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c907
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h44
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c846
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h760
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.c1071
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.h77
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.c162
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h82
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c1820
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h147
-rw-r--r--drivers/net/wireless/rndis_wlan.c2710
-rw-r--r--drivers/net/wireless/rsi/Kconfig57
-rw-r--r--drivers/net/wireless/rsi/Makefile15
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_coex.c178
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c501
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c328
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c1168
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c2116
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c460
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c2262
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_ps.c145
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c1529
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c415
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c937
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c61
-rw-r--r--drivers/net/wireless/rsi/rsi_boot_params.h193
-rw-r--r--drivers/net/wireless/rsi/rsi_coex.h37
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h92
-rw-r--r--drivers/net/wireless/rsi/rsi_debugfs.h47
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h230
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h414
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h758
-rw-r--r--drivers/net/wireless/rsi/rsi_ps.h63
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h138
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h85
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig144
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c3066
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h1945
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h1001
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c210
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c995
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c69
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.h50
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c471
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c689
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c409
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h144
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c844
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00rfkill.c127
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c732
-rw-r--r--drivers/net/wireless/rtl818x/Makefile7
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c1089
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h232
-rw-r--r--drivers/net/wireless/silabs/Kconfig18
-rw-r--r--drivers/net/wireless/silabs/Makefile3
-rw-r--r--drivers/net/wireless/silabs/wfx/Kconfig13
-rw-r--r--drivers/net/wireless/silabs/wfx/Makefile25
-rw-r--r--drivers/net/wireless/silabs/wfx/bh.c324
-rw-r--r--drivers/net/wireless/silabs/wfx/bh.h34
-rw-r--r--drivers/net/wireless/silabs/wfx/bus.h37
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_sdio.c326
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_spi.c321
-rw-r--r--drivers/net/wireless/silabs/wfx/data_rx.c93
-rw-r--r--drivers/net/wireless/silabs/wfx/data_rx.h17
-rw-r--r--drivers/net/wireless/silabs/wfx/data_tx.c594
-rw-r--r--drivers/net/wireless/silabs/wfx/data_tx.h53
-rw-r--r--drivers/net/wireless/silabs/wfx/debug.c331
-rw-r--r--drivers/net/wireless/silabs/wfx/debug.h19
-rw-r--r--drivers/net/wireless/silabs/wfx/fwio.c388
-rw-r--r--drivers/net/wireless/silabs/wfx/fwio.h15
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_api_cmd.h553
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_api_general.h252
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_api_mib.h346
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_rx.c391
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_rx.h17
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx.c537
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx.h62
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx_mib.c307
-rw-r--r--drivers/net/wireless/silabs/wfx/hif_tx_mib.h48
-rw-r--r--drivers/net/wireless/silabs/wfx/hwio.c332
-rw-r--r--drivers/net/wireless/silabs/wfx/hwio.h78
-rw-r--r--drivers/net/wireless/silabs/wfx/key.c227
-rw-r--r--drivers/net/wireless/silabs/wfx/key.h19
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c525
-rw-r--r--drivers/net/wireless/silabs/wfx/main.h41
-rw-r--r--drivers/net/wireless/silabs/wfx/queue.c322
-rw-r--r--drivers/net/wireless/silabs/wfx/queue.h45
-rw-r--r--drivers/net/wireless/silabs/wfx/scan.c209
-rw-r--r--drivers/net/wireless/silabs/wfx/scan.h28
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c841
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h73
-rw-r--r--drivers/net/wireless/silabs/wfx/traces.h496
-rw-r--r--drivers/net/wireless/silabs/wfx/wfx.h169
-rw-r--r--drivers/net/wireless/st/Kconfig17
-rw-r--r--drivers/net/wireless/st/Makefile2
-rw-r--r--drivers/net/wireless/st/cw1200/Kconfig31
-rw-r--r--drivers/net/wireless/st/cw1200/Makefile22
-rw-r--r--drivers/net/wireless/st/cw1200/bh.c608
-rw-r--r--drivers/net/wireless/st/cw1200/bh.h25
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200.h320
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_sdio.c425
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_spi.c471
-rw-r--r--drivers/net/wireless/st/cw1200/debug.c389
-rw-r--r--drivers/net/wireless/st/cw1200/debug.h90
-rw-r--r--drivers/net/wireless/st/cw1200/fwio.c523
-rw-r--r--drivers/net/wireless/st/cw1200/fwio.h36
-rw-r--r--drivers/net/wireless/st/cw1200/hwbus.h30
-rw-r--r--drivers/net/wireless/st/cw1200/hwio.c309
-rw-r--r--drivers/net/wireless/st/cw1200/hwio.h244
-rw-r--r--drivers/net/wireless/st/cw1200/main.c609
-rw-r--r--drivers/net/wireless/st/cw1200/pm.c363
-rw-r--r--drivers/net/wireless/st/cw1200/pm.h45
-rw-r--r--drivers/net/wireless/st/cw1200/queue.c552
-rw-r--r--drivers/net/wireless/st/cw1200/queue.h112
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c462
-rw-r--r--drivers/net/wireless/st/cw1200/scan.h53
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c2390
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h121
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c1465
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.h103
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c1817
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.h1855
-rw-r--r--drivers/net/wireless/strip.c2802
-rw-r--r--drivers/net/wireless/ti/Kconfig21
-rw-r--r--drivers/net/wireless/ti/Makefile5
-rw-r--r--drivers/net/wireless/ti/wl1251/Kconfig34
-rw-r--r--drivers/net/wireless/ti/wl1251/Makefile9
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c1019
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h (renamed from drivers/net/wireless/wl12xx/acx.h)669
-rw-r--r--drivers/net/wireless/ti/wl1251/boot.c541
-rw-r--r--drivers/net/wireless/ti/wl1251/boot.h25
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c427
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h402
-rw-r--r--drivers/net/wireless/ti/wl1251/debugfs.c (renamed from drivers/net/wireless/wl12xx/debugfs.c)164
-rw-r--r--drivers/net/wireless/ti/wl1251/debugfs.h17
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c232
-rw-r--r--drivers/net/wireless/ti/wl1251/event.h (renamed from drivers/net/wireless/wl12xx/event.h)42
-rw-r--r--drivers/net/wireless/ti/wl1251/init.c412
-rw-r--r--drivers/net/wireless/ti/wl1251/init.h72
-rw-r--r--drivers/net/wireless/ti/wl1251/io.c188
-rw-r--r--drivers/net/wireless/ti/wl1251/io.h69
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c1680
-rw-r--r--drivers/net/wireless/ti/wl1251/ps.c170
-rw-r--r--drivers/net/wireless/ti/wl1251/ps.h21
-rw-r--r--drivers/net/wireless/ti/wl1251/reg.h (renamed from drivers/net/wireless/wl12xx/reg.h)144
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.c221
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.h (renamed from drivers/net/wireless/wl12xx/rx.h)44
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c330
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c348
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.h45
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c (renamed from drivers/net/wireless/wl12xx/tx.c)332
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.h (renamed from drivers/net/wireless/wl12xx/tx.h)72
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h (renamed from drivers/net/wireless/wl12xx/wl12xx.h)209
-rw-r--r--drivers/net/wireless/ti/wl1251/wl12xx_80211.h (renamed from drivers/net/wireless/wl12xx/wl12xx_80211.h)37
-rw-r--r--drivers/net/wireless/ti/wl12xx/Kconfig10
-rw-r--r--drivers/net/wireless/ti/wl12xx/Makefile4
-rw-r--r--drivers/net/wireless/ti/wl12xx/acx.c39
-rw-r--r--drivers/net/wireless/ti/wl12xx/acx.h259
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.c309
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.h118
-rw-r--r--drivers/net/wireless/ti/wl12xx/conf.h269
-rw-r--r--drivers/net/wireless/ti/wl12xx/debugfs.c211
-rw-r--r--drivers/net/wireless/ti/wl12xx/debugfs.h14
-rw-r--r--drivers/net/wireless/ti/wl12xx/event.c102
-rw-r--r--drivers/net/wireless/ti/wl12xx/event.h97
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c1966
-rw-r--r--drivers/net/wireless/ti/wl12xx/reg.h542
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c498
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.h126
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h149
-rw-r--r--drivers/net/wireless/ti/wl18xx/Kconfig8
-rw-r--r--drivers/net/wireless/ti/wl18xx/Makefile4
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.c326
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.h405
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.c242
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.h82
-rw-r--r--drivers/net/wireless/ti/wl18xx/conf.h218
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c560
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.h14
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c236
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.h110
-rw-r--r--drivers/net/wireless/ti/wl18xx/io.c61
-rw-r--r--drivers/net/wireless/ti/wl18xx/io.h14
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c2159
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h235
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.c337
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.h113
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c181
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.h32
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h246
-rw-r--r--drivers/net/wireless/ti/wlcore/Kconfig36
-rw-r--r--drivers/net/wireless/ti/wlcore/Makefile11
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c1839
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h1122
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c525
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.h41
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c2076
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h709
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h1156
-rw-r--r--drivers/net/wireless/ti/wlcore/debug.h98
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c1337
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h103
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c389
-rw-r--r--drivers/net/wireless/ti/wlcore/event.h82
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h318
-rw-r--r--drivers/net/wireless/ti/wlcore/ini.h218
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c751
-rw-r--r--drivers/net/wireless/ti/wlcore/init.h26
-rw-r--r--drivers/net/wireless/ti/wlcore/io.c198
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h229
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c6849
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c172
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.h24
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c342
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h141
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c477
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.h164
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c447
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c569
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c170
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.h14
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c384
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.h18
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c1311
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h273
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c198
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.h42
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx_80211.h137
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h640
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h572
-rw-r--r--drivers/net/wireless/virtual/Kconfig20
-rw-r--r--drivers/net/wireless/virtual/Makefile3
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c7235
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h346
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c715
-rw-r--r--drivers/net/wireless/wavelan.c4386
-rw-r--r--drivers/net/wireless/wavelan.h370
-rw-r--r--drivers/net/wireless/wavelan.p.h696
-rw-r--r--drivers/net/wireless/wavelan_cs.c4649
-rw-r--r--drivers/net/wireless/wavelan_cs.h386
-rw-r--r--drivers/net/wireless/wavelan_cs.p.h766
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig11
-rw-r--r--drivers/net/wireless/wl12xx/Makefile4
-rw-r--r--drivers/net/wireless/wl12xx/acx.c689
-rw-r--r--drivers/net/wireless/wl12xx/boot.c295
-rw-r--r--drivers/net/wireless/wl12xx/boot.h40
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c353
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h265
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.h33
-rw-r--r--drivers/net/wireless/wl12xx/event.c127
-rw-r--r--drivers/net/wireless/wl12xx/init.c200
-rw-r--r--drivers/net/wireless/wl12xx/init.h40
-rw-r--r--drivers/net/wireless/wl12xx/main.c1358
-rw-r--r--drivers/net/wireless/wl12xx/ps.c151
-rw-r--r--drivers/net/wireless/wl12xx/ps.h36
-rw-r--r--drivers/net/wireless/wl12xx/rx.c208
-rw-r--r--drivers/net/wireless/wl12xx/spi.c358
-rw-r--r--drivers/net/wireless/wl12xx/spi.h109
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.c709
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h165
-rw-r--r--drivers/net/wireless/wl3501.h616
-rw-r--r--drivers/net/wireless/wl3501_cs.c2129
-rw-r--r--drivers/net/wireless/zd1201.c1921
-rw-r--r--drivers/net/wireless/zd1201.h147
-rw-r--r--drivers/net/wireless/zydas/Kconfig17
-rw-r--r--drivers/net/wireless/zydas/Makefile2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/Kconfig (renamed from drivers/net/wireless/zd1211rw/Kconfig)11
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/Makefile (renamed from drivers/net/wireless/zd1211rw/Makefile)5
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_chip.c (renamed from drivers/net/wireless/zd1211rw/zd_chip.c)622
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_chip.h (renamed from drivers/net/wireless/zd1211rw/zd_chip.h)584
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_def.h (renamed from drivers/net/wireless/zd1211rw/zd_def.h)25
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c (renamed from drivers/net/wireless/zd1211rw/zd_mac.c)902
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.h (renamed from drivers/net/wireless/zd1211rw/zd_mac.h)78
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_rf.c (renamed from drivers/net/wireless/zd1211rw/zd_rf.c)15
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_rf.h (renamed from drivers/net/wireless/zd1211rw/zd_rf.h)20
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_rf_al2230.c (renamed from drivers/net/wireless/zd1211rw/zd_rf_al2230.c)213
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_rf_al7230b.c (renamed from drivers/net/wireless/zd1211rw/zd_rf_al7230b.c)255
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c (renamed from drivers/net/wireless/zd1211rw/zd_rf_rf2959.c)97
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_rf_uw2453.c (renamed from drivers/net/wireless/zd1211rw/zd_rf_uw2453.c)104
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c (renamed from drivers/net/wireless/zd1211rw/zd_usb.c)974
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.h (renamed from drivers/net/wireless/zd1211rw/zd_usb.h)84
2516 files changed, 1813377 insertions, 229028 deletions
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index a67d29290ba0..c6599594dc99 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -1,506 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-only
#
# Wireless LAN device configuration
#
-menu "Wireless LAN"
+menuconfig WLAN
+ bool "Wireless LAN"
depends on !S390
-
-config WLAN_PRE80211
- bool "Wireless LAN (pre-802.11)"
- depends on NETDEVICES
- ---help---
- Say Y if you have any pre-802.11 wireless LAN hardware.
-
- This option does not affect the kernel build, it only
- lets you choose drivers.
-
-config STRIP
- tristate "STRIP (Metricom starmode radio IP)"
- depends on INET && WLAN_PRE80211
- select WIRELESS_EXT
- ---help---
- Say Y if you have a Metricom radio and intend to use Starmode Radio
- IP. STRIP is a radio protocol developed for the MosquitoNet project
- (on the WWW at <http://mosquitonet.stanford.edu/>) to send Internet
- traffic using Metricom radios. Metricom radios are small, battery
- powered, 100kbit/sec packet radio transceivers, about the size and
- weight of a cellular telephone. (You may also have heard them called
- "Metricom modems" but we avoid the term "modem" because it misleads
- many people into thinking that you can plug a Metricom modem into a
- phone line and use it as a modem.)
-
- You can use STRIP on any Linux machine with a serial port, although
- it is obviously most useful for people with laptop computers. If you
- think you might get a Metricom radio in the future, there is no harm
- in saying Y to STRIP now, except that it makes the kernel a bit
- bigger.
-
- To compile this as a module, choose M here: the module will be
- called strip.
-
-config ARLAN
- tristate "Aironet Arlan 655 & IC2200 DS support"
- depends on ISA && !64BIT && WLAN_PRE80211
- select WIRELESS_EXT
- ---help---
- Aironet makes Arlan, a class of wireless LAN adapters. These use the
- www.Telxon.com chip, which is also used on several similar cards.
- This driver is tested on the 655 and IC2200 series cards. Look at
- <http://www.ylenurme.ee/~elmer/655/> for the latest information.
-
- The driver is built as two modules, arlan and arlan-proc. The latter
- is the /proc interface and is not needed most of time.
-
- On some computers the card ends up in non-valid state after some
- time. Use a ping-reset script to clear it.
-
-config WAVELAN
- tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support"
- depends on ISA && WLAN_PRE80211
- select WIRELESS_EXT
- ---help---
- The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is
- a Radio LAN (wireless Ethernet-like Local Area Network) using the
- radio frequencies 900 MHz and 2.4 GHz.
-
- If you want to use an ISA WaveLAN card under Linux, say Y and read
- the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>. Some more specific
- information is contained in
- <file:Documentation/networking/wavelan.txt> and in the source code
- <file:drivers/net/wireless/wavelan.p.h>.
-
- You will also need the wireless tools package available from
- <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
- Please read the man pages contained therein.
-
- To compile this driver as a module, choose M here: the module will be
- called wavelan.
-
-config PCMCIA_WAVELAN
- tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support"
- depends on PCMCIA && WLAN_PRE80211
- select WIRELESS_EXT
- help
- Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA
- (PC-card) wireless Ethernet networking card to your computer. This
- driver is for the non-IEEE-802.11 Wavelan cards.
-
- To compile this driver as a module, choose M here: the module will be
- called wavelan_cs. If unsure, say N.
-
-config PCMCIA_NETWAVE
- tristate "Xircom Netwave AirSurfer Pcmcia wireless support"
- depends on PCMCIA && WLAN_PRE80211
- select WIRELESS_EXT
- help
- Say Y here if you intend to attach this type of PCMCIA (PC-card)
- wireless Ethernet networking card to your computer.
-
- To compile this driver as a module, choose M here: the module will be
- called netwave_cs. If unsure, say N.
-
-
-config WLAN_80211
- bool "Wireless LAN (IEEE 802.11)"
- depends on NETDEVICES
- ---help---
- Say Y if you have any 802.11 wireless LAN hardware.
-
- This option does not affect the kernel build, it only
- lets you choose drivers.
-
-config PCMCIA_RAYCS
- tristate "Aviator/Raytheon 2.4GHz wireless support"
- depends on PCMCIA && WLAN_80211
- select WIRELESS_EXT
- ---help---
- Say Y here if you intend to attach an Aviator/Raytheon PCMCIA
- (PC-card) wireless Ethernet networking card to your computer.
- Please read the file <file:Documentation/networking/ray_cs.txt> for
- details.
-
- To compile this driver as a module, choose M here: the module will be
- called ray_cs. If unsure, say N.
-
-config LIBERTAS
- tristate "Marvell 8xxx Libertas WLAN driver support"
- depends on WLAN_80211
- select WIRELESS_EXT
- select LIB80211
- select FW_LOADER
- ---help---
- A library for Marvell Libertas 8xxx devices.
-
-config LIBERTAS_USB
- tristate "Marvell Libertas 8388 USB 802.11b/g cards"
- depends on LIBERTAS && USB
- ---help---
- A driver for Marvell Libertas 8388 USB devices.
-
-config LIBERTAS_CS
- tristate "Marvell Libertas 8385 CompactFlash 802.11b/g cards"
- depends on LIBERTAS && PCMCIA
- select FW_LOADER
- ---help---
- A driver for Marvell Libertas 8385 CompactFlash devices.
-
-config LIBERTAS_SDIO
- tristate "Marvell Libertas 8385/8686/8688 SDIO 802.11b/g cards"
- depends on LIBERTAS && MMC
- ---help---
- A driver for Marvell Libertas 8385/8686/8688 SDIO devices.
-
-config LIBERTAS_SPI
- tristate "Marvell Libertas 8686 SPI 802.11b/g cards"
- depends on LIBERTAS && SPI && GENERIC_GPIO
- ---help---
- A driver for Marvell Libertas 8686 SPI devices.
-
-config LIBERTAS_DEBUG
- bool "Enable full debugging output in the Libertas module."
- depends on LIBERTAS
- ---help---
- Debugging support.
-
-config LIBERTAS_THINFIRM
- tristate "Marvell 8xxx Libertas WLAN driver support with thin firmware"
- depends on WLAN_80211 && MAC80211
- select FW_LOADER
- ---help---
- A library for Marvell Libertas 8xxx devices using thinfirm.
-
-config LIBERTAS_THINFIRM_USB
- tristate "Marvell Libertas 8388 USB 802.11b/g cards with thin firmware"
- depends on LIBERTAS_THINFIRM && USB
- ---help---
- A driver for Marvell Libertas 8388 USB devices using thinfirm.
-
-config AIRO
- tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
- depends on ISA_DMA_API && WLAN_80211 && (PCI || BROKEN)
- select WIRELESS_EXT
- select CRYPTO
- ---help---
- This is the standard Linux driver to support Cisco/Aironet ISA and
- PCI 802.11 wireless cards.
- It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X
- - with or without encryption) as well as card before the Cisco
- acquisition (Aironet 4500, Aironet 4800, Aironet 4800B).
-
- This driver support both the standard Linux Wireless Extensions
- and Cisco proprietary API, so both the Linux Wireless Tools and the
- Cisco Linux utilities can be used to configure the card.
-
- The driver can be compiled as a module and will be named "airo".
-
-config ATMEL
- tristate "Atmel at76c50x chipset 802.11b support"
- depends on (PCI || PCMCIA) && WLAN_80211
- select WIRELESS_EXT
- select FW_LOADER
- select CRC32
- ---help---
- A driver 802.11b wireless cards based on the Atmel fast-vnet
- chips. This driver supports standard Linux wireless extensions.
-
- Many cards based on this chipset do not have flash memory
- and need their firmware loaded at start-up. If yours is
- one of these, you will need to provide a firmware image
- to be loaded into the card by the driver. The Atmel
- firmware package can be downloaded from
- <http://www.thekelleys.org.uk/atmel>
-
-config PCI_ATMEL
- tristate "Atmel at76c506 PCI cards"
- depends on ATMEL && PCI
- ---help---
- Enable support for PCI and mini-PCI cards containing the
- Atmel at76c506 chip.
-
-config PCMCIA_ATMEL
- tristate "Atmel at76c502/at76c504 PCMCIA cards"
- depends on ATMEL && PCMCIA
- select WIRELESS_EXT
- select FW_LOADER
- select CRC32
- ---help---
- Enable support for PCMCIA cards containing the
- Atmel at76c502 and at76c504 chips.
-
-config AT76C50X_USB
- tristate "Atmel at76c503/at76c505/at76c505a USB cards"
- depends on MAC80211 && WLAN_80211 && USB
- select FW_LOADER
- ---help---
- Enable support for USB Wireless devices using Atmel at76c503,
- at76c505 or at76c505a chips.
-
-config AIRO_CS
- tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
- depends on PCMCIA && (BROKEN || !M32R) && WLAN_80211
- select WIRELESS_EXT
- select CRYPTO
- select CRYPTO_AES
- ---help---
- This is the standard Linux driver to support Cisco/Aironet PCMCIA
- 802.11 wireless cards. This driver is the same as the Aironet
- driver part of the Linux Pcmcia package.
- It supports the new 802.11b cards from Cisco (Cisco 34X, Cisco 35X
- - with or without encryption) as well as card before the Cisco
- acquisition (Aironet 4500, Aironet 4800, Aironet 4800B). It also
- supports OEM of Cisco such as the DELL TrueMobile 4800 and Xircom
- 802.11b cards.
-
- This driver support both the standard Linux Wireless Extensions
- and Cisco proprietary API, so both the Linux Wireless Tools and the
- Cisco Linux utilities can be used to configure the card.
-
-config PCMCIA_WL3501
- tristate "Planet WL3501 PCMCIA cards"
- depends on EXPERIMENTAL && PCMCIA && WLAN_80211
- select WIRELESS_EXT
- ---help---
- A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet.
- It has basic support for Linux wireless extensions and initial
- micro support for ethtool.
-
-config PRISM54
- tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus'
- depends on PCI && EXPERIMENTAL && WLAN_80211
- select WIRELESS_EXT
- select FW_LOADER
- ---help---
- Enable PCI and Cardbus support for the following chipset based cards:
-
- ISL3880 - Prism GT 802.11 b/g
- ISL3877 - Prism Indigo 802.11 a
- ISL3890 - Prism Duette 802.11 a/b/g
-
- For a complete list of supported cards visit <http://prism54.org>.
- Here is the latest confirmed list of supported cards:
-
- 3com OfficeConnect 11g Cardbus Card aka 3CRWE154G72 (version 1)
- Allnet ALL0271 PCI Card
- Compex WL54G Cardbus Card
- Corega CG-WLCB54GT Cardbus Card
- D-Link Air Plus Xtreme G A1 Cardbus Card aka DWL-g650
- I-O Data WN-G54/CB Cardbus Card
- Kobishi XG-300 aka Z-Com Cardbus Card
- Netgear WG511 Cardbus Card
- Ovislink WL-5400PCI PCI Card
- Peabird WLG-PCI PCI Card
- Sitecom WL-100i Cardbus Card
- Sitecom WL-110i PCI Card
- SMC2802W - EZ Connect g 2.4GHz 54 Mbps Wireless PCI Card
- SMC2835W - EZ Connect g 2.4GHz 54 Mbps Wireless Cardbus Card
- SMC2835W-V2 - EZ Connect g 2.4GHz 54 Mbps Wireless Cardbus Card
- Z-Com XG-900 PCI Card
- Zyxel G-100 Cardbus Card
-
- If you enable this you will need a firmware file as well.
- You will need to copy this to /usr/lib/hotplug/firmware/isl3890.
- You can get this non-GPL'd firmware file from the Prism54 project page:
- <http://prism54.org>
- You will also need the /etc/hotplug/firmware.agent script from
- a current hotplug package.
-
- Note: You need a motherboard with DMA support to use any of these cards
-
- If you want to compile the driver as a module ( = code which can be
- inserted in and removed from the running kernel whenever you want),
- say M here and read <file:Documentation/kbuild/modules.txt>.
- The module will be called prism54.ko.
-
-config USB_ZD1201
- tristate "USB ZD1201 based Wireless device support"
- depends on USB && WLAN_80211
- select WIRELESS_EXT
- select FW_LOADER
- ---help---
- Say Y if you want to use wireless LAN adapters based on the ZyDAS
- ZD1201 chip.
-
- This driver makes the adapter appear as a normal Ethernet interface,
- typically on wlan0.
-
- The zd1201 device requires external firmware to be loaded.
- This can be found at http://linux-lc100020.sourceforge.net/
-
- To compile this driver as a module, choose M here: the
- module will be called zd1201.
-
-config USB_NET_RNDIS_WLAN
- tristate "Wireless RNDIS USB support"
- depends on USB && WLAN_80211 && EXPERIMENTAL
- select USB_USBNET
- select USB_NET_CDCETHER
- select USB_NET_RNDIS_HOST
- select WIRELESS_EXT
- select CFG80211
- ---help---
- This is a driver for wireless RNDIS devices.
- These are USB based adapters found in devices such as:
-
- Buffalo WLI-U2-KG125S
- U.S. Robotics USR5421
- Belkin F5D7051
- Linksys WUSB54GSv2
- Linksys WUSB54GSC
- Asus WL169gE
- Eminent EM4045
- BT Voyager 1055
- Linksys WUSB54GSv1
- U.S. Robotics USR5420
- BUFFALO WLI-USB-G54
-
- All of these devices are based on Broadcom 4320 chip which is the
- only wireless RNDIS chip known to date.
-
- If you choose to build a module, it'll be called rndis_wlan.
-
-config RTL8180
- tristate "Realtek 8180/8185 PCI support"
- depends on MAC80211 && PCI && WLAN_80211 && EXPERIMENTAL
- select EEPROM_93CX6
- ---help---
- This is a driver for RTL8180 and RTL8185 based cards.
- These are PCI based chips found in cards such as:
-
- (RTL8185 802.11g)
- A-Link WL54PC
-
- (RTL8180 802.11b)
- Belkin F5D6020 v3
- Belkin F5D6020 v3
- Dlink DWL-610
- Dlink DWL-510
- Netgear MA521
- Level-One WPC-0101
- Acer Aspire 1357 LMi
- VCTnet PC-11B1
- Ovislink AirLive WL-1120PCM
- Mentor WL-PCI
- Linksys WPC11 v4
- TrendNET TEW-288PI
- D-Link DWL-520 Rev D
- Repotec RP-WP7126
- TP-Link TL-WN250/251
- Zonet ZEW1000
- Longshine LCS-8031-R
- HomeLine HLW-PCC200
- GigaFast WF721-AEX
- Planet WL-3553
- Encore ENLWI-PCI1-NT
- TrendNET TEW-266PC
- Gigabyte GN-WLMR101
- Siemens-fujitsu Amilo D1840W
- Edimax EW-7126
- PheeNet WL-11PCIR
- Tonze PC-2100T
- Planet WL-8303
- Dlink DWL-650 v M1
- Edimax EW-7106
- Q-Tec 770WC
- Topcom Skyr@cer 4011b
- Roper FreeLan 802.11b (edition 2004)
- Wistron Neweb Corp CB-200B
- Pentagram HorNET
- QTec 775WC
- TwinMOS Booming B Series
- Micronet SP906BB
- Sweex LC700010
- Surecom EP-9428
- Safecom SWLCR-1100
-
- Thanks to Realtek for their support!
-
-config RTL8187
- tristate "Realtek 8187 and 8187B USB support"
- depends on MAC80211 && USB && WLAN_80211
- select EEPROM_93CX6
- ---help---
- This is a driver for RTL8187 and RTL8187B based cards.
- These are USB based chips found in devices such as:
-
- Netgear WG111v2
- Level 1 WNC-0301USB
- Micronet SP907GK V5
- Encore ENUWI-G2
- Trendnet TEW-424UB
- ASUS P5B Deluxe
- Toshiba Satellite Pro series of laptops
- Asus Wireless Link
-
- Thanks to Realtek for their support!
-
-# If possible, automatically enable LEDs for RTL8187.
-
-config RTL8187_LEDS
- bool
- depends on RTL8187 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = RTL8187)
+ depends on NET
+ select WIRELESS
default y
+ help
+ This section contains all the pre 802.11 and 802.11 wireless
+ device drivers. For a complete list of drivers and documentation
+ on them refer to the wireless wiki:
-config ADM8211
- tristate "ADMtek ADM8211 support"
- depends on MAC80211 && PCI && WLAN_80211 && EXPERIMENTAL
- select CRC32
- select EEPROM_93CX6
- ---help---
- This driver is for ADM8211A, ADM8211B, and ADM8211C based cards.
- These are PCI/mini-PCI/Cardbus 802.11b chips found in cards such as:
-
- Xterasys Cardbus XN-2411b
- Blitz NetWave Point PC
- TrendNet 221pc
- Belkin F5D6001
- SMC 2635W
- Linksys WPC11 v1
- Fiberline FL-WL-200X
- 3com Office Connect (3CRSHPW796)
- Corega WLPCIB-11
- SMC 2602W V2 EU
- D-Link DWL-520 Revision C
-
- However, some of these cards have been replaced with other chips
- like the RTL8180L (Xterasys Cardbus XN-2411b, Belkin F5D6001) or
- the Ralink RT2400 (SMC2635W) without a model number change.
-
- Thanks to Infineon-ADMtek for their support of this driver.
-
-config MAC80211_HWSIM
- tristate "Simulated radio testing tool for mac80211"
- depends on MAC80211 && WLAN_80211
- ---help---
- This driver is a developer testing tool that can be used to test
- IEEE 802.11 networking stack (mac80211) functionality. This is not
- needed for normal wireless LAN usage and is only for testing. See
- Documentation/networking/mac80211_hwsim for more information on how
- to use this tool.
-
- To compile this driver as a module, choose M here: the module will be
- called mac80211_hwsim. If unsure, say N.
-
-config MWL8K
- tristate "Marvell 88W8xxx PCI/PCIe Wireless support"
- depends on MAC80211 && PCI && WLAN_80211 && EXPERIMENTAL
- ---help---
- This driver supports Marvell TOPDOG 802.11 wireless cards.
+ https://wireless.wiki.kernel.org/en/users/Drivers
- To compile this driver as a module, choose M here: the module
- will be called mwl8k. If unsure, say N.
+if WLAN
-source "drivers/net/wireless/p54/Kconfig"
+source "drivers/net/wireless/admtek/Kconfig"
source "drivers/net/wireless/ath/Kconfig"
-source "drivers/net/wireless/ipw2x00/Kconfig"
-source "drivers/net/wireless/iwlwifi/Kconfig"
-source "drivers/net/wireless/hostap/Kconfig"
-source "drivers/net/wireless/b43/Kconfig"
-source "drivers/net/wireless/b43legacy/Kconfig"
-source "drivers/net/wireless/zd1211rw/Kconfig"
-source "drivers/net/wireless/rt2x00/Kconfig"
-source "drivers/net/wireless/orinoco/Kconfig"
-source "drivers/net/wireless/wl12xx/Kconfig"
-source "drivers/net/wireless/iwmc3200wifi/Kconfig"
-
-endmenu
+source "drivers/net/wireless/atmel/Kconfig"
+source "drivers/net/wireless/broadcom/Kconfig"
+source "drivers/net/wireless/intel/Kconfig"
+source "drivers/net/wireless/intersil/Kconfig"
+source "drivers/net/wireless/marvell/Kconfig"
+source "drivers/net/wireless/mediatek/Kconfig"
+source "drivers/net/wireless/microchip/Kconfig"
+source "drivers/net/wireless/purelifi/Kconfig"
+source "drivers/net/wireless/ralink/Kconfig"
+source "drivers/net/wireless/realtek/Kconfig"
+source "drivers/net/wireless/rsi/Kconfig"
+source "drivers/net/wireless/silabs/Kconfig"
+source "drivers/net/wireless/st/Kconfig"
+source "drivers/net/wireless/ti/Kconfig"
+source "drivers/net/wireless/zydas/Kconfig"
+source "drivers/net/wireless/quantenna/Kconfig"
+
+source "drivers/net/wireless/virtual/Kconfig"
+
+endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 7a4647e78fd3..e1c4141c6004 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -1,64 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux Wireless network device drivers.
#
-obj-$(CONFIG_IPW2100) += ipw2x00/
-obj-$(CONFIG_IPW2200) += ipw2x00/
-
-obj-$(CONFIG_STRIP) += strip.o
-obj-$(CONFIG_ARLAN) += arlan.o
-
-arlan-objs := arlan-main.o arlan-proc.o
-
-# Obsolete cards
-obj-$(CONFIG_WAVELAN) += wavelan.o
-obj-$(CONFIG_PCMCIA_NETWAVE) += netwave_cs.o
-obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan_cs.o
-
-obj-$(CONFIG_HERMES) += orinoco/
-
-obj-$(CONFIG_AIRO) += airo.o
-obj-$(CONFIG_AIRO_CS) += airo_cs.o airo.o
-
-obj-$(CONFIG_ATMEL) += atmel.o
-obj-$(CONFIG_PCI_ATMEL) += atmel_pci.o
-obj-$(CONFIG_PCMCIA_ATMEL) += atmel_cs.o
-
-obj-$(CONFIG_AT76C50X_USB) += at76c50x-usb.o
-
-obj-$(CONFIG_PRISM54) += prism54/
-
-obj-$(CONFIG_HOSTAP) += hostap/
-obj-$(CONFIG_B43) += b43/
-obj-$(CONFIG_B43LEGACY) += b43legacy/
-obj-$(CONFIG_ZD1211RW) += zd1211rw/
-obj-$(CONFIG_RTL8180) += rtl818x/
-obj-$(CONFIG_RTL8187) += rtl818x/
-
-# 16-bit wireless PCMCIA client drivers
-obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
-obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o
-
-obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o
-
-obj-$(CONFIG_USB_ZD1201) += zd1201.o
-obj-$(CONFIG_LIBERTAS) += libertas/
-
-obj-$(CONFIG_LIBERTAS_THINFIRM) += libertas_tf/
-
-obj-$(CONFIG_ADM8211) += adm8211.o
-
-obj-$(CONFIG_MWL8K) += mwl8k.o
-
-obj-$(CONFIG_IWLWIFI) += iwlwifi/
-obj-$(CONFIG_RT2X00) += rt2x00/
-
-obj-$(CONFIG_P54_COMMON) += p54/
-
-obj-$(CONFIG_ATH_COMMON) += ath/
-
-obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
-
-obj-$(CONFIG_WL12XX) += wl12xx/
-
-obj-$(CONFIG_IWM) += iwmc3200wifi/
+obj-$(CONFIG_WLAN_VENDOR_ADMTEK) += admtek/
+obj-$(CONFIG_WLAN_VENDOR_ATH) += ath/
+obj-$(CONFIG_WLAN_VENDOR_ATMEL) += atmel/
+obj-$(CONFIG_WLAN_VENDOR_BROADCOM) += broadcom/
+obj-$(CONFIG_WLAN_VENDOR_INTEL) += intel/
+obj-$(CONFIG_WLAN_VENDOR_INTERSIL) += intersil/
+obj-$(CONFIG_WLAN_VENDOR_MARVELL) += marvell/
+obj-$(CONFIG_WLAN_VENDOR_MEDIATEK) += mediatek/
+obj-$(CONFIG_WLAN_VENDOR_MICROCHIP) += microchip/
+obj-$(CONFIG_WLAN_VENDOR_PURELIFI) += purelifi/
+obj-$(CONFIG_WLAN_VENDOR_QUANTENNA) += quantenna/
+obj-$(CONFIG_WLAN_VENDOR_RALINK) += ralink/
+obj-$(CONFIG_WLAN_VENDOR_REALTEK) += realtek/
+obj-$(CONFIG_WLAN_VENDOR_RSI) += rsi/
+obj-$(CONFIG_WLAN_VENDOR_SILABS) += silabs/
+obj-$(CONFIG_WLAN_VENDOR_ST) += st/
+obj-$(CONFIG_WLAN_VENDOR_TI) += ti/
+obj-$(CONFIG_WLAN_VENDOR_ZYDAS) += zydas/
+
+obj-$(CONFIG_WLAN) += virtual/
diff --git a/drivers/net/wireless/admtek/Kconfig b/drivers/net/wireless/admtek/Kconfig
new file mode 100644
index 000000000000..678979a1d987
--- /dev/null
+++ b/drivers/net/wireless/admtek/Kconfig
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config WLAN_VENDOR_ADMTEK
+ bool "ADMtek devices"
+ default y
+ help
+ If you have a wireless card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all the
+ questions about these cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if WLAN_VENDOR_ADMTEK
+
+config ADM8211
+ tristate "ADMtek ADM8211 support"
+ depends on MAC80211 && PCI
+ select CRC32
+ select EEPROM_93CX6
+ help
+ This driver is for ADM8211A, ADM8211B, and ADM8211C based cards.
+ These are PCI/mini-PCI/Cardbus 802.11b chips found in cards such as:
+
+ Xterasys Cardbus XN-2411b
+ Blitz NetWave Point PC
+ TrendNet 221pc
+ Belkin F5D6001
+ SMC 2635W
+ Linksys WPC11 v1
+ Fiberline FL-WL-200X
+ 3com Office Connect (3CRSHPW796)
+ Corega WLPCIB-11
+ SMC 2602W V2 EU
+ D-Link DWL-520 Revision C
+
+ However, some of these cards have been replaced with other chips
+ like the RTL8180L (Xterasys Cardbus XN-2411b, Belkin F5D6001) or
+ the Ralink RT2400 (SMC2635W) without a model number change.
+
+ Thanks to Infineon-ADMtek for their support of this driver.
+
+endif # WLAN_VENDOR_ADMTEK
diff --git a/drivers/net/wireless/admtek/Makefile b/drivers/net/wireless/admtek/Makefile
new file mode 100644
index 000000000000..709c2bca53ed
--- /dev/null
+++ b/drivers/net/wireless/admtek/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ADM8211) += adm8211.o
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 2b9e379994a1..e94a6b180314 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Linux device driver for ADMtek ADM8211 (IEEE 802.11b MAC/BBP)
@@ -8,21 +9,18 @@
* and used with permission.
*
* Much thanks to Infineon-ADMtek for their support of this driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
*/
-#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/if.h>
#include <linux/skbuff.h>
+#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/crc32.h>
#include <linux/eeprom_93cx6.h>
+#include <linux/module.h>
#include <net/mac80211.h>
#include "adm8211.h"
@@ -30,7 +28,6 @@
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
MODULE_AUTHOR("Jouni Malinen <j@w1.fi>");
MODULE_DESCRIPTION("Driver for IEEE 802.11b wireless cards based on ADMtek ADM8211");
-MODULE_SUPPORTED_DEVICE("ADM8211");
MODULE_LICENSE("GPL");
static unsigned int tx_ring_size __read_mostly = 16;
@@ -39,7 +36,7 @@ static unsigned int rx_ring_size __read_mostly = 16;
module_param(tx_ring_size, uint, 0);
module_param(rx_ring_size, uint, 0);
-static struct pci_device_id adm8211_pci_id_table[] __devinitdata = {
+static const struct pci_device_id adm8211_pci_id_table[] = {
/* ADMtek ADM8211 */
{ PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */
{ PCI_DEVICE(0x1200, 0x8201) }, /* ? */
@@ -302,18 +299,6 @@ static int adm8211_get_stats(struct ieee80211_hw *dev,
return 0;
}
-static int adm8211_get_tx_stats(struct ieee80211_hw *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct adm8211_priv *priv = dev->priv;
-
- stats[0].len = priv->cur_tx - priv->dirty_tx;
- stats[0].limit = priv->tx_ring_size - 2;
- stats[0].count = priv->dirty_tx;
-
- return 0;
-}
-
static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
@@ -338,8 +323,8 @@ static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
/* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */
- pci_unmap_single(priv->pdev, info->mapping,
- info->skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&priv->pdev->dev, info->mapping,
+ info->skb->len, DMA_TO_DEVICE);
ieee80211_tx_info_clear_status(txi);
@@ -384,8 +369,8 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
pktlen = status & RDES0_STATUS_FL;
if (pktlen > RX_PKT_SIZE) {
if (net_ratelimit())
- printk(KERN_DEBUG "%s: frame too long (%d)\n",
- wiphy_name(dev->wiphy), pktlen);
+ wiphy_debug(dev->wiphy, "frame too long (%d)\n",
+ pktlen);
pktlen = RX_PKT_SIZE;
}
@@ -396,33 +381,39 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
} else if (pktlen < RX_COPY_BREAK) {
skb = dev_alloc_skb(pktlen);
if (skb) {
- pci_dma_sync_single_for_cpu(
- priv->pdev,
- priv->rx_buffers[entry].mapping,
- pktlen, PCI_DMA_FROMDEVICE);
- memcpy(skb_put(skb, pktlen),
- skb_tail_pointer(priv->rx_buffers[entry].skb),
- pktlen);
- pci_dma_sync_single_for_device(
- priv->pdev,
- priv->rx_buffers[entry].mapping,
- RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&priv->pdev->dev,
+ priv->rx_buffers[entry].mapping,
+ pktlen,
+ DMA_FROM_DEVICE);
+ skb_put_data(skb,
+ skb_tail_pointer(priv->rx_buffers[entry].skb),
+ pktlen);
+ dma_sync_single_for_device(&priv->pdev->dev,
+ priv->rx_buffers[entry].mapping,
+ RX_PKT_SIZE,
+ DMA_FROM_DEVICE);
}
} else {
newskb = dev_alloc_skb(RX_PKT_SIZE);
if (newskb) {
skb = priv->rx_buffers[entry].skb;
skb_put(skb, pktlen);
- pci_unmap_single(
- priv->pdev,
- priv->rx_buffers[entry].mapping,
- RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pdev->dev,
+ priv->rx_buffers[entry].mapping,
+ RX_PKT_SIZE, DMA_FROM_DEVICE);
priv->rx_buffers[entry].skb = newskb;
priv->rx_buffers[entry].mapping =
- pci_map_single(priv->pdev,
+ dma_map_single(&priv->pdev->dev,
skb_tail_pointer(newskb),
RX_PKT_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev,
+ priv->rx_buffers[entry].mapping)) {
+ priv->rx_buffers[entry].skb = NULL;
+ dev_kfree_skb(newskb);
+ skb = NULL;
+ /* TODO: update rx dropped stats */
+ }
} else {
skb = NULL;
/* TODO: update rx dropped stats */
@@ -450,9 +441,10 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
rx_status.rate_idx = rate;
rx_status.freq = adm8211_channels[priv->channel - 1].center_freq;
- rx_status.band = IEEE80211_BAND_2GHZ;
+ rx_status.band = NL80211_BAND_2GHZ;
- ieee80211_rx_irqsafe(dev, skb, &rx_status);
+ memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+ ieee80211_rx_irqsafe(dev, skb);
}
entry = (++priv->cur_rx) % priv->rx_ring_size;
@@ -464,10 +456,10 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
static irqreturn_t adm8211_interrupt(int irq, void *dev_id)
{
-#define ADM8211_INT(x) \
-do { \
- if (unlikely(stsr & ADM8211_STSR_ ## x)) \
- printk(KERN_DEBUG "%s: " #x "\n", wiphy_name(dev->wiphy)); \
+#define ADM8211_INT(x) \
+do { \
+ if (unlikely(stsr & ADM8211_STSR_ ## x)) \
+ wiphy_debug(dev->wiphy, "%s\n", #x); \
} while (0)
struct ieee80211_hw *dev = dev_id;
@@ -580,9 +572,9 @@ static int adm8211_write_bbp(struct ieee80211_hw *dev, u8 addr, u8 data)
}
if (timeout == 0) {
- printk(KERN_DEBUG "%s: adm8211_write_bbp(%d,%d) failed"
- " prewrite (reg=0x%08x)\n",
- wiphy_name(dev->wiphy), addr, data, reg);
+ wiphy_debug(dev->wiphy,
+ "adm8211_write_bbp(%d,%d) failed prewrite (reg=0x%08x)\n",
+ addr, data, reg);
return -ETIMEDOUT;
}
@@ -615,9 +607,9 @@ static int adm8211_write_bbp(struct ieee80211_hw *dev, u8 addr, u8 data)
if (timeout == 0) {
ADM8211_CSR_WRITE(BBPCTL, ADM8211_CSR_READ(BBPCTL) &
~ADM8211_BBPCTL_WR);
- printk(KERN_DEBUG "%s: adm8211_write_bbp(%d,%d) failed"
- " postwrite (reg=0x%08x)\n",
- wiphy_name(dev->wiphy), addr, data, reg);
+ wiphy_debug(dev->wiphy,
+ "adm8211_write_bbp(%d,%d) failed postwrite (reg=0x%08x)\n",
+ addr, data, reg);
return -ETIMEDOUT;
}
@@ -685,8 +677,8 @@ static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan)
break;
default:
- printk(KERN_DEBUG "%s: unsupported transceiver type %d\n",
- wiphy_name(dev->wiphy), priv->transceiver_type);
+ wiphy_debug(dev->wiphy, "unsupported transceiver type %d\n",
+ priv->transceiver_type);
break;
}
@@ -742,8 +734,8 @@ static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan)
/* Nothing to do for ADMtek BBP */
} else if (priv->bbp_type != ADM8211_TYPE_ADMTEK)
- printk(KERN_DEBUG "%s: unsupported BBP type %d\n",
- wiphy_name(dev->wiphy), priv->bbp_type);
+ wiphy_debug(dev->wiphy, "unsupported BBP type %d\n",
+ priv->bbp_type);
ADM8211_RESTORE();
@@ -1037,13 +1029,12 @@ static int adm8211_hw_init_bbp(struct ieee80211_hw *dev)
break;
default:
- printk(KERN_DEBUG "%s: unsupported transceiver %d\n",
- wiphy_name(dev->wiphy), priv->transceiver_type);
+ wiphy_debug(dev->wiphy, "unsupported transceiver %d\n",
+ priv->transceiver_type);
break;
}
} else
- printk(KERN_DEBUG "%s: unsupported BBP %d\n",
- wiphy_name(dev->wiphy), priv->bbp_type);
+ wiphy_debug(dev->wiphy, "unsupported BBP %d\n", priv->bbp_type);
ADM8211_CSR_WRITE(SYNRF, 0);
@@ -1108,14 +1099,18 @@ static void adm8211_hw_init(struct ieee80211_hw *dev)
pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline);
switch (cline) {
- case 0x8: reg |= (0x1 << 14);
- break;
- case 0x16: reg |= (0x2 << 14);
- break;
- case 0x32: reg |= (0x3 << 14);
- break;
- default: reg |= (0x0 << 14);
- break;
+ case 0x8:
+ reg |= (0x1 << 14);
+ break;
+ case 0x10:
+ reg |= (0x2 << 14);
+ break;
+ case 0x20:
+ reg |= (0x3 << 14);
+ break;
+ default:
+ reg |= (0x0 << 14);
+ break;
}
}
@@ -1259,7 +1254,8 @@ static int adm8211_hw_reset(struct ieee80211_hw *dev)
return 0;
}
-static u64 adm8211_get_tsft(struct ieee80211_hw *dev)
+static u64 adm8211_get_tsft(struct ieee80211_hw *dev,
+ struct ieee80211_vif *vif)
{
struct adm8211_priv *priv = dev->priv;
u32 tsftl;
@@ -1297,11 +1293,12 @@ static void adm8211_set_bssid(struct ieee80211_hw *dev, const u8 *bssid)
ADM8211_CSR_WRITE(ABDA1, reg);
}
-static int adm8211_config(struct ieee80211_hw *dev, u32 changed)
+static int adm8211_config(struct ieee80211_hw *dev, int radio_idx, u32 changed)
{
struct adm8211_priv *priv = dev->priv;
struct ieee80211_conf *conf = &dev->conf;
- int channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
+ int channel =
+ ieee80211_frequency_to_channel(conf->chandef.chan->center_freq);
if (channel != priv->channel) {
priv->channel = channel;
@@ -1314,54 +1311,60 @@ static int adm8211_config(struct ieee80211_hw *dev, u32 changed)
static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
- u32 changes)
+ u64 changes)
{
struct adm8211_priv *priv = dev->priv;
if (!(changes & BSS_CHANGED_BSSID))
return;
- if (memcmp(conf->bssid, priv->bssid, ETH_ALEN)) {
+ if (!ether_addr_equal(conf->bssid, priv->bssid)) {
adm8211_set_bssid(dev, conf->bssid);
memcpy(priv->bssid, conf->bssid, ETH_ALEN);
}
}
+static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ unsigned int bit_nr;
+ u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+
+ mc_filter[1] = mc_filter[0] = 0;
+
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+
+ bit_nr &= 0x3F;
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+
+ return mc_filter[0] | ((u64)(mc_filter[1]) << 32);
+}
+
static void adm8211_configure_filter(struct ieee80211_hw *dev,
unsigned int changed_flags,
unsigned int *total_flags,
- int mc_count, struct dev_mc_list *mclist)
+ u64 multicast)
{
static const u8 bcast[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
struct adm8211_priv *priv = dev->priv;
- unsigned int bit_nr, new_flags;
+ unsigned int new_flags;
u32 mc_filter[2];
- int i;
+
+ mc_filter[0] = multicast;
+ mc_filter[1] = multicast >> 32;
new_flags = 0;
- if (*total_flags & FIF_PROMISC_IN_BSS) {
- new_flags |= FIF_PROMISC_IN_BSS;
- priv->nar |= ADM8211_NAR_PR;
- priv->nar &= ~ADM8211_NAR_MM;
- mc_filter[1] = mc_filter[0] = ~0;
- } else if ((*total_flags & FIF_ALLMULTI) || (mc_count > 32)) {
+ if (*total_flags & FIF_ALLMULTI || multicast == ~(0ULL)) {
new_flags |= FIF_ALLMULTI;
priv->nar &= ~ADM8211_NAR_PR;
priv->nar |= ADM8211_NAR_MM;
mc_filter[1] = mc_filter[0] = ~0;
} else {
priv->nar &= ~(ADM8211_NAR_MM | ADM8211_NAR_PR);
- mc_filter[1] = mc_filter[0] = 0;
- for (i = 0; i < mc_count; i++) {
- if (!mclist)
- break;
- bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
-
- bit_nr &= 0x3F;
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- mclist = mclist->next;
- }
}
ADM8211_IDLE_RX();
@@ -1371,9 +1374,9 @@ static void adm8211_configure_filter(struct ieee80211_hw *dev,
ADM8211_CSR_READ(NAR);
if (priv->nar & ADM8211_NAR_PR)
- dev->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+ ieee80211_hw_set(dev, RX_INCLUDES_FCS);
else
- dev->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+ __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, dev->flags);
if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
adm8211_set_bssid(dev, bcast);
@@ -1386,15 +1389,15 @@ static void adm8211_configure_filter(struct ieee80211_hw *dev,
}
static int adm8211_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct adm8211_priv *priv = dev->priv;
if (priv->mode != NL80211_IFTYPE_MONITOR)
return -EOPNOTSUPP;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
- priv->mode = conf->type;
+ priv->mode = vif->type;
break;
default:
return -EOPNOTSUPP;
@@ -1402,8 +1405,8 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
ADM8211_IDLE();
- ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)conf->mac_addr));
- ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(conf->mac_addr + 4)));
+ ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)vif->addr));
+ ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(vif->addr + 4)));
adm8211_update_mode(dev);
@@ -1413,7 +1416,7 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
}
static void adm8211_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct adm8211_priv *priv = dev->priv;
priv->mode = NL80211_IFTYPE_MONITOR;
@@ -1444,10 +1447,16 @@ static int adm8211_init_rings(struct ieee80211_hw *dev)
rx_info->skb = dev_alloc_skb(RX_PKT_SIZE);
if (rx_info->skb == NULL)
break;
- rx_info->mapping = pci_map_single(priv->pdev,
+ rx_info->mapping = dma_map_single(&priv->pdev->dev,
skb_tail_pointer(rx_info->skb),
RX_PKT_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev, rx_info->mapping)) {
+ dev_kfree_skb(rx_info->skb);
+ rx_info->skb = NULL;
+ break;
+ }
+
desc->buffer1 = cpu_to_le32(rx_info->mapping);
desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL);
}
@@ -1479,10 +1488,9 @@ static void adm8211_free_rings(struct ieee80211_hw *dev)
if (!priv->rx_buffers[i].skb)
continue;
- pci_unmap_single(
- priv->pdev,
- priv->rx_buffers[i].mapping,
- RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pdev->dev,
+ priv->rx_buffers[i].mapping, RX_PKT_SIZE,
+ DMA_FROM_DEVICE);
dev_kfree_skb(priv->rx_buffers[i].skb);
}
@@ -1491,10 +1499,9 @@ static void adm8211_free_rings(struct ieee80211_hw *dev)
if (!priv->tx_buffers[i].skb)
continue;
- pci_unmap_single(priv->pdev,
+ dma_unmap_single(&priv->pdev->dev,
priv->tx_buffers[i].mapping,
- priv->tx_buffers[i].skb->len,
- PCI_DMA_TODEVICE);
+ priv->tx_buffers[i].skb->len, DMA_TO_DEVICE);
dev_kfree_skb(priv->tx_buffers[i].skb);
}
@@ -1508,15 +1515,13 @@ static int adm8211_start(struct ieee80211_hw *dev)
/* Power up MAC and RF chips */
retval = adm8211_hw_reset(dev);
if (retval) {
- printk(KERN_ERR "%s: hardware reset failed\n",
- wiphy_name(dev->wiphy));
+ wiphy_err(dev->wiphy, "hardware reset failed\n");
goto fail;
}
retval = adm8211_init_rings(dev);
if (retval) {
- printk(KERN_ERR "%s: failed to initialize rings\n",
- wiphy_name(dev->wiphy));
+ wiphy_err(dev->wiphy, "failed to initialize rings\n");
goto fail;
}
@@ -1524,11 +1529,10 @@ static int adm8211_start(struct ieee80211_hw *dev)
adm8211_hw_init(dev);
adm8211_rf_set_channel(dev, priv->channel);
- retval = request_irq(priv->pdev->irq, &adm8211_interrupt,
+ retval = request_irq(priv->pdev->irq, adm8211_interrupt,
IRQF_SHARED, "adm8211", dev);
if (retval) {
- printk(KERN_ERR "%s: failed to register IRQ handler\n",
- wiphy_name(dev->wiphy));
+ wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
goto fail;
}
@@ -1546,7 +1550,7 @@ fail:
return retval;
}
-static void adm8211_stop(struct ieee80211_hw *dev)
+static void adm8211_stop(struct ieee80211_hw *dev, bool suspend)
{
struct adm8211_priv *priv = dev->priv;
@@ -1614,7 +1618,7 @@ static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int
}
/* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */
-static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
+static int adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
u16 plcp_signal,
size_t hdrlen)
{
@@ -1624,8 +1628,10 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
unsigned int entry;
u32 flag;
- mapping = pci_map_single(priv->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev, mapping))
+ return -ENOMEM;
spin_lock_irqsave(&priv->lock, flags);
@@ -1658,10 +1664,14 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
/* Trigger transmit poll */
ADM8211_CSR_WRITE(TDR, 0);
+
+ return 0;
}
/* Put adm8211_tx_hdr on skb and transmit */
-static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
+static void adm8211_tx(struct ieee80211_hw *dev,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct adm8211_tx_hdr *txhdr;
size_t payload_len, hdrlen;
@@ -1682,7 +1692,7 @@ static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
skb_pull(skb, hdrlen);
payload_len = skb->len;
- txhdr = (struct adm8211_tx_hdr *) skb_push(skb, sizeof(*txhdr));
+ txhdr = skb_push(skb, sizeof(*txhdr));
memset(txhdr, 0, sizeof(*txhdr));
memcpy(txhdr->da, ieee80211_get_DA(hdr), ETH_ALEN);
txhdr->signal = plcp_signal;
@@ -1709,9 +1719,10 @@ static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
txhdr->retry_limit = info->control.rates[0].count;
- adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
-
- return NETDEV_TX_OK;
+ if (adm8211_tx_raw(dev, skb, plcp_signal, hdrlen)) {
+ /* Drop packet */
+ ieee80211_free_txskb(dev, skb);
+ }
}
static int adm8211_alloc_rings(struct ieee80211_hw *dev)
@@ -1730,8 +1741,8 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
/* Allocate TX/RX descriptors */
ring_size = sizeof(struct adm8211_desc) * priv->rx_ring_size +
sizeof(struct adm8211_desc) * priv->tx_ring_size;
- priv->rx_ring = pci_alloc_consistent(priv->pdev, ring_size,
- &priv->rx_ring_dma);
+ priv->rx_ring = dma_alloc_coherent(&priv->pdev->dev, ring_size,
+ &priv->rx_ring_dma, GFP_KERNEL);
if (!priv->rx_ring) {
kfree(priv->rx_buffers);
@@ -1740,8 +1751,7 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
return -ENOMEM;
}
- priv->tx_ring = (struct adm8211_desc *)(priv->rx_ring +
- priv->rx_ring_size);
+ priv->tx_ring = priv->rx_ring + priv->rx_ring_size;
priv->tx_ring_dma = priv->rx_ring_dma +
sizeof(struct adm8211_desc) * priv->rx_ring_size;
@@ -1749,26 +1759,31 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
}
static const struct ieee80211_ops adm8211_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = adm8211_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = adm8211_start,
.stop = adm8211_stop,
.add_interface = adm8211_add_interface,
.remove_interface = adm8211_remove_interface,
.config = adm8211_config,
.bss_info_changed = adm8211_bss_info_changed,
+ .prepare_multicast = adm8211_prepare_multicast,
.configure_filter = adm8211_configure_filter,
.get_stats = adm8211_get_stats,
- .get_tx_stats = adm8211_get_tx_stats,
.get_tsf = adm8211_get_tsft
};
-static int __devinit adm8211_probe(struct pci_dev *pdev,
+static int adm8211_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct ieee80211_hw *dev;
struct adm8211_priv *priv;
- unsigned long mem_addr, mem_len;
- unsigned int io_addr, io_len;
+ unsigned long mem_len;
+ unsigned int io_len;
int err;
u32 reg;
u8 perm_addr[ETH_ALEN];
@@ -1780,13 +1795,12 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
return err;
}
- io_addr = pci_resource_start(pdev, 0);
io_len = pci_resource_len(pdev, 0);
- mem_addr = pci_resource_start(pdev, 1);
mem_len = pci_resource_len(pdev, 1);
if (io_len < 256 || mem_len < 1024) {
printk(KERN_ERR "%s (adm8211): Too short PCI resources\n",
pci_name(pdev));
+ err = -ENOMEM;
goto err_disable_pdev;
}
@@ -1796,6 +1810,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
if (reg != ADM8211_SIG1 && reg != ADM8211_SIG2) {
printk(KERN_ERR "%s (adm8211): Invalid signature (0x%x)\n",
pci_name(pdev), reg);
+ err = -EINVAL;
goto err_disable_pdev;
}
@@ -1806,8 +1821,8 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
return err; /* someone else grabbed it? don't disable it */
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
printk(KERN_ERR "%s (adm8211): No suitable DMA available\n",
pci_name(pdev));
goto err_free_reg;
@@ -1838,13 +1853,15 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
if (!priv->map) {
printk(KERN_ERR "%s (adm8211): Cannot map device memory\n",
pci_name(pdev));
+ err = -ENOMEM;
goto err_free_dev;
}
priv->rx_ring_size = rx_ring_size;
priv->tx_ring_size = tx_ring_size;
- if (adm8211_alloc_rings(dev)) {
+ err = adm8211_alloc_rings(dev);
+ if (err) {
printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n",
pci_name(pdev));
goto err_iounmap;
@@ -1857,16 +1874,15 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
if (!is_valid_ether_addr(perm_addr)) {
printk(KERN_WARNING "%s (adm8211): Invalid hwaddr in EEPROM!\n",
pci_name(pdev));
- random_ether_addr(perm_addr);
+ eth_random_addr(perm_addr);
}
SET_IEEE80211_PERM_ADDR(dev, perm_addr);
dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr);
- /* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */
- dev->flags = IEEE80211_HW_SIGNAL_UNSPEC;
+ /* dev->flags = RX_INCLUDES_FCS in promisc mode */
+ ieee80211_hw_set(dev, SIGNAL_UNSPEC);
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
- dev->channel_change_time = 1000;
dev->max_signal = 100; /* FIXME: find better value */
dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */
@@ -1896,33 +1912,36 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
priv->channel = 1;
- dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
+
+ wiphy_ext_feature_set(dev->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
err = ieee80211_register_hw(dev);
if (err) {
printk(KERN_ERR "%s (adm8211): Cannot register device\n",
pci_name(pdev));
- goto err_free_desc;
+ goto err_free_eeprom;
}
- printk(KERN_INFO "%s: hwaddr %pM, Rev 0x%02x\n",
- wiphy_name(dev->wiphy), dev->wiphy->perm_addr,
- pdev->revision);
+ wiphy_info(dev->wiphy, "hwaddr %pM, Rev 0x%02x\n",
+ dev->wiphy->perm_addr, pdev->revision);
return 0;
+ err_free_eeprom:
+ kfree(priv->eeprom);
+
err_free_desc:
- pci_free_consistent(pdev,
- sizeof(struct adm8211_desc) * priv->rx_ring_size +
- sizeof(struct adm8211_desc) * priv->tx_ring_size,
- priv->rx_ring, priv->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct adm8211_desc) * priv->rx_ring_size +
+ sizeof(struct adm8211_desc) * priv->tx_ring_size,
+ priv->rx_ring, priv->rx_ring_dma);
kfree(priv->rx_buffers);
err_iounmap:
pci_iounmap(pdev, priv->map);
err_free_dev:
- pci_set_drvdata(pdev, NULL);
ieee80211_free_hw(dev);
err_free_reg:
@@ -1934,7 +1953,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
}
-static void __devexit adm8211_remove(struct pci_dev *pdev)
+static void adm8211_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *dev = pci_get_drvdata(pdev);
struct adm8211_priv *priv;
@@ -1946,10 +1965,10 @@ static void __devexit adm8211_remove(struct pci_dev *pdev)
priv = dev->priv;
- pci_free_consistent(pdev,
- sizeof(struct adm8211_desc) * priv->rx_ring_size +
- sizeof(struct adm8211_desc) * priv->tx_ring_size,
- priv->rx_ring, priv->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct adm8211_desc) * priv->rx_ring_size +
+ sizeof(struct adm8211_desc) * priv->tx_ring_size,
+ priv->rx_ring, priv->rx_ring_dma);
kfree(priv->rx_buffers);
kfree(priv->eeprom);
@@ -1960,67 +1979,20 @@ static void __devexit adm8211_remove(struct pci_dev *pdev)
}
-#ifdef CONFIG_PM
-static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct ieee80211_hw *dev = pci_get_drvdata(pdev);
- struct adm8211_priv *priv = dev->priv;
-
- if (priv->mode != NL80211_IFTYPE_UNSPECIFIED) {
- ieee80211_stop_queues(dev);
- adm8211_stop(dev);
- }
-
- pci_save_state(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- return 0;
-}
-
-static int adm8211_resume(struct pci_dev *pdev)
-{
- struct ieee80211_hw *dev = pci_get_drvdata(pdev);
- struct adm8211_priv *priv = dev->priv;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- if (priv->mode != NL80211_IFTYPE_UNSPECIFIED) {
- adm8211_start(dev);
- ieee80211_wake_queues(dev);
- }
-
- return 0;
-}
-#endif /* CONFIG_PM */
-
+#define adm8211_suspend NULL
+#define adm8211_resume NULL
MODULE_DEVICE_TABLE(pci, adm8211_pci_id_table);
+static SIMPLE_DEV_PM_OPS(adm8211_pm_ops, adm8211_suspend, adm8211_resume);
+
/* TODO: implement enable_wake */
static struct pci_driver adm8211_driver = {
.name = "adm8211",
.id_table = adm8211_pci_id_table,
.probe = adm8211_probe,
- .remove = __devexit_p(adm8211_remove),
-#ifdef CONFIG_PM
- .suspend = adm8211_suspend,
- .resume = adm8211_resume,
-#endif /* CONFIG_PM */
+ .remove = adm8211_remove,
+ .driver.pm = &adm8211_pm_ops,
};
-
-
-static int __init adm8211_init(void)
-{
- return pci_register_driver(&adm8211_driver);
-}
-
-
-static void __exit adm8211_exit(void)
-{
- pci_unregister_driver(&adm8211_driver);
-}
-
-
-module_init(adm8211_init);
-module_exit(adm8211_exit);
+module_pci_driver(adm8211_driver);
diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/admtek/adm8211.h
index 4f6ab1322189..095625ecb8ff 100644
--- a/drivers/net/wireless/adm8211.h
+++ b/drivers/net/wireless/admtek/adm8211.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ADM8211_H
#define ADM8211_H
@@ -80,7 +81,7 @@ struct adm8211_csr {
__le32 FEMR; /* 0x104 */
__le32 FPSR; /* 0x108 */
__le32 FFER; /* 0x10C */
-} __attribute__ ((packed));
+} __packed;
/* CSR0 - PAR (PCI Address Register) */
#define ADM8211_PAR_MWIE (1 << 24)
@@ -266,7 +267,7 @@ do { \
#define ADM8211_SYNCTL_CS1 (1 << 28)
#define ADM8211_SYNCTL_CAL (1 << 27)
#define ADM8211_SYNCTL_SELCAL (1 << 26)
-#define ADM8211_SYNCTL_RFtype ((1 << 24) || (1 << 23) || (1 << 22))
+#define ADM8211_SYNCTL_RFtype ((1 << 24) | (1 << 23) | (1 << 22))
#define ADM8211_SYNCTL_RFMD (1 << 22)
#define ADM8211_SYNCTL_GENERAL (0x7 << 22)
/* SYNCTL 21:0 Data (Si4126: 18-bit data, 4-bit address) */
@@ -484,7 +485,7 @@ struct adm8211_tx_hdr {
u8 entry_control; // huh??
u16 reserved_1;
u32 reserved_2;
-} __attribute__ ((packed));
+} __packed;
#define RX_COPY_BREAK 128
@@ -530,8 +531,8 @@ struct adm8211_eeprom {
u8 lpf_cutoff[14]; /* 0x62 */
u8 lnags_threshold[14]; /* 0x70 */
__le16 checksum; /* 0x7E */
- u8 cis_data[0]; /* 0x80, 384 bytes */
-} __attribute__ ((packed));
+ u8 cis_data[]; /* 0x80, 384 bytes */
+} __packed;
struct adm8211_priv {
struct pci_dev *pdev;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
deleted file mode 100644
index d73475739127..000000000000
--- a/drivers/net/wireless/airo.c
+++ /dev/null
@@ -1,8264 +0,0 @@
-/*======================================================================
-
- Aironet driver for 4500 and 4800 series cards
-
- This code is released under both the GPL version 2 and BSD licenses.
- Either license may be used. The respective licenses are found at
- the end of this file.
-
- This code was developed by Benjamin Reed <breed@users.sourceforge.net>
- including portions of which come from the Aironet PC4500
- Developer's Reference Manual and used with permission. Copyright
- (C) 1999 Benjamin Reed. All Rights Reserved. Permission to use
- code in the Developer's manual was granted for this driver by
- Aironet. Major code contributions were received from Javier Achirica
- <achirica@users.sourceforge.net> and Jean Tourrilhes <jt@hpl.hp.com>.
- Code was also integrated from the Cisco Aironet driver for Linux.
- Support for MPI350 cards was added by Fabrice Bellet
- <fabrice@bellet.info>.
-
-======================================================================*/
-
-#include <linux/err.h>
-#include <linux/init.h>
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/bitops.h>
-#include <linux/scatterlist.h>
-#include <linux/crypto.h>
-#include <asm/io.h>
-#include <asm/system.h>
-#include <asm/unaligned.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <asm/uaccess.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
-
-#include <linux/ieee80211.h>
-
-#include "airo.h"
-
-#define DRV_NAME "airo"
-
-#ifdef CONFIG_PCI
-static struct pci_device_id card_ids[] = {
- { 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, },
- { 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID },
- { 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, },
- { 0x14b9, 0x0340, PCI_ANY_ID, PCI_ANY_ID, },
- { 0x14b9, 0x0350, PCI_ANY_ID, PCI_ANY_ID, },
- { 0x14b9, 0x5000, PCI_ANY_ID, PCI_ANY_ID, },
- { 0x14b9, 0xa504, PCI_ANY_ID, PCI_ANY_ID, },
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, card_ids);
-
-static int airo_pci_probe(struct pci_dev *, const struct pci_device_id *);
-static void airo_pci_remove(struct pci_dev *);
-static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state);
-static int airo_pci_resume(struct pci_dev *pdev);
-
-static struct pci_driver airo_driver = {
- .name = DRV_NAME,
- .id_table = card_ids,
- .probe = airo_pci_probe,
- .remove = __devexit_p(airo_pci_remove),
- .suspend = airo_pci_suspend,
- .resume = airo_pci_resume,
-};
-#endif /* CONFIG_PCI */
-
-/* Include Wireless Extension definition and check version - Jean II */
-#include <linux/wireless.h>
-#define WIRELESS_SPY /* enable iwspy support */
-#include <net/iw_handler.h> /* New driver API */
-
-#define CISCO_EXT /* enable Cisco extensions */
-#ifdef CISCO_EXT
-#include <linux/delay.h>
-#endif
-
-/* Hack to do some power saving */
-#define POWER_ON_DOWN
-
-/* As you can see this list is HUGH!
- I really don't know what a lot of these counts are about, but they
- are all here for completeness. If the IGNLABEL macro is put in
- infront of the label, that statistic will not be included in the list
- of statistics in the /proc filesystem */
-
-#define IGNLABEL(comment) NULL
-static char *statsLabels[] = {
- "RxOverrun",
- IGNLABEL("RxPlcpCrcErr"),
- IGNLABEL("RxPlcpFormatErr"),
- IGNLABEL("RxPlcpLengthErr"),
- "RxMacCrcErr",
- "RxMacCrcOk",
- "RxWepErr",
- "RxWepOk",
- "RetryLong",
- "RetryShort",
- "MaxRetries",
- "NoAck",
- "NoCts",
- "RxAck",
- "RxCts",
- "TxAck",
- "TxRts",
- "TxCts",
- "TxMc",
- "TxBc",
- "TxUcFrags",
- "TxUcPackets",
- "TxBeacon",
- "RxBeacon",
- "TxSinColl",
- "TxMulColl",
- "DefersNo",
- "DefersProt",
- "DefersEngy",
- "DupFram",
- "RxFragDisc",
- "TxAged",
- "RxAged",
- "LostSync-MaxRetry",
- "LostSync-MissedBeacons",
- "LostSync-ArlExceeded",
- "LostSync-Deauth",
- "LostSync-Disassoced",
- "LostSync-TsfTiming",
- "HostTxMc",
- "HostTxBc",
- "HostTxUc",
- "HostTxFail",
- "HostRxMc",
- "HostRxBc",
- "HostRxUc",
- "HostRxDiscard",
- IGNLABEL("HmacTxMc"),
- IGNLABEL("HmacTxBc"),
- IGNLABEL("HmacTxUc"),
- IGNLABEL("HmacTxFail"),
- IGNLABEL("HmacRxMc"),
- IGNLABEL("HmacRxBc"),
- IGNLABEL("HmacRxUc"),
- IGNLABEL("HmacRxDiscard"),
- IGNLABEL("HmacRxAccepted"),
- "SsidMismatch",
- "ApMismatch",
- "RatesMismatch",
- "AuthReject",
- "AuthTimeout",
- "AssocReject",
- "AssocTimeout",
- IGNLABEL("ReasonOutsideTable"),
- IGNLABEL("ReasonStatus1"),
- IGNLABEL("ReasonStatus2"),
- IGNLABEL("ReasonStatus3"),
- IGNLABEL("ReasonStatus4"),
- IGNLABEL("ReasonStatus5"),
- IGNLABEL("ReasonStatus6"),
- IGNLABEL("ReasonStatus7"),
- IGNLABEL("ReasonStatus8"),
- IGNLABEL("ReasonStatus9"),
- IGNLABEL("ReasonStatus10"),
- IGNLABEL("ReasonStatus11"),
- IGNLABEL("ReasonStatus12"),
- IGNLABEL("ReasonStatus13"),
- IGNLABEL("ReasonStatus14"),
- IGNLABEL("ReasonStatus15"),
- IGNLABEL("ReasonStatus16"),
- IGNLABEL("ReasonStatus17"),
- IGNLABEL("ReasonStatus18"),
- IGNLABEL("ReasonStatus19"),
- "RxMan",
- "TxMan",
- "RxRefresh",
- "TxRefresh",
- "RxPoll",
- "TxPoll",
- "HostRetries",
- "LostSync-HostReq",
- "HostTxBytes",
- "HostRxBytes",
- "ElapsedUsec",
- "ElapsedSec",
- "LostSyncBetterAP",
- "PrivacyMismatch",
- "Jammed",
- "DiscRxNotWepped",
- "PhyEleMismatch",
- (char*)-1 };
-#ifndef RUN_AT
-#define RUN_AT(x) (jiffies+(x))
-#endif
-
-
-/* These variables are for insmod, since it seems that the rates
- can only be set in setup_card. Rates should be a comma separated
- (no spaces) list of rates (up to 8). */
-
-static int rates[8];
-static int basic_rate;
-static char *ssids[3];
-
-static int io[4];
-static int irq[4];
-
-static
-int maxencrypt /* = 0 */; /* The highest rate that the card can encrypt at.
- 0 means no limit. For old cards this was 4 */
-
-static int auto_wep /* = 0 */; /* If set, it tries to figure out the wep mode */
-static int aux_bap /* = 0 */; /* Checks to see if the aux ports are needed to read
- the bap, needed on some older cards and buses. */
-static int adhoc;
-
-static int probe = 1;
-
-static int proc_uid /* = 0 */;
-
-static int proc_gid /* = 0 */;
-
-static int airo_perm = 0555;
-
-static int proc_perm = 0644;
-
-MODULE_AUTHOR("Benjamin Reed");
-MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet \
-cards. Direct support for ISA/PCI/MPI cards and support \
-for PCMCIA when used with airo_cs.");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340/350");
-module_param_array(io, int, NULL, 0);
-module_param_array(irq, int, NULL, 0);
-module_param(basic_rate, int, 0);
-module_param_array(rates, int, NULL, 0);
-module_param_array(ssids, charp, NULL, 0);
-module_param(auto_wep, int, 0);
-MODULE_PARM_DESC(auto_wep, "If non-zero, the driver will keep looping through \
-the authentication options until an association is made. The value of \
-auto_wep is number of the wep keys to check. A value of 2 will try using \
-the key at index 0 and index 1.");
-module_param(aux_bap, int, 0);
-MODULE_PARM_DESC(aux_bap, "If non-zero, the driver will switch into a mode \
-than seems to work better for older cards with some older buses. Before \
-switching it checks that the switch is needed.");
-module_param(maxencrypt, int, 0);
-MODULE_PARM_DESC(maxencrypt, "The maximum speed that the card can do \
-encryption. Units are in 512kbs. Zero (default) means there is no limit. \
-Older cards used to be limited to 2mbs (4).");
-module_param(adhoc, int, 0);
-MODULE_PARM_DESC(adhoc, "If non-zero, the card will start in adhoc mode.");
-module_param(probe, int, 0);
-MODULE_PARM_DESC(probe, "If zero, the driver won't start the card.");
-
-module_param(proc_uid, int, 0);
-MODULE_PARM_DESC(proc_uid, "The uid that the /proc files will belong to.");
-module_param(proc_gid, int, 0);
-MODULE_PARM_DESC(proc_gid, "The gid that the /proc files will belong to.");
-module_param(airo_perm, int, 0);
-MODULE_PARM_DESC(airo_perm, "The permission bits of /proc/[driver/]aironet.");
-module_param(proc_perm, int, 0);
-MODULE_PARM_DESC(proc_perm, "The permission bits of the files in /proc");
-
-/* This is a kind of sloppy hack to get this information to OUT4500 and
- IN4500. I would be extremely interested in the situation where this
- doesn't work though!!! */
-static int do8bitIO /* = 0 */;
-
-/* Return codes */
-#define SUCCESS 0
-#define ERROR -1
-#define NO_PACKET -2
-
-/* Commands */
-#define NOP2 0x0000
-#define MAC_ENABLE 0x0001
-#define MAC_DISABLE 0x0002
-#define CMD_LOSE_SYNC 0x0003 /* Not sure what this does... */
-#define CMD_SOFTRESET 0x0004
-#define HOSTSLEEP 0x0005
-#define CMD_MAGIC_PKT 0x0006
-#define CMD_SETWAKEMASK 0x0007
-#define CMD_READCFG 0x0008
-#define CMD_SETMODE 0x0009
-#define CMD_ALLOCATETX 0x000a
-#define CMD_TRANSMIT 0x000b
-#define CMD_DEALLOCATETX 0x000c
-#define NOP 0x0010
-#define CMD_WORKAROUND 0x0011
-#define CMD_ALLOCATEAUX 0x0020
-#define CMD_ACCESS 0x0021
-#define CMD_PCIBAP 0x0022
-#define CMD_PCIAUX 0x0023
-#define CMD_ALLOCBUF 0x0028
-#define CMD_GETTLV 0x0029
-#define CMD_PUTTLV 0x002a
-#define CMD_DELTLV 0x002b
-#define CMD_FINDNEXTTLV 0x002c
-#define CMD_PSPNODES 0x0030
-#define CMD_SETCW 0x0031
-#define CMD_SETPCF 0x0032
-#define CMD_SETPHYREG 0x003e
-#define CMD_TXTEST 0x003f
-#define MAC_ENABLETX 0x0101
-#define CMD_LISTBSS 0x0103
-#define CMD_SAVECFG 0x0108
-#define CMD_ENABLEAUX 0x0111
-#define CMD_WRITERID 0x0121
-#define CMD_USEPSPNODES 0x0130
-#define MAC_ENABLERX 0x0201
-
-/* Command errors */
-#define ERROR_QUALIF 0x00
-#define ERROR_ILLCMD 0x01
-#define ERROR_ILLFMT 0x02
-#define ERROR_INVFID 0x03
-#define ERROR_INVRID 0x04
-#define ERROR_LARGE 0x05
-#define ERROR_NDISABL 0x06
-#define ERROR_ALLOCBSY 0x07
-#define ERROR_NORD 0x0B
-#define ERROR_NOWR 0x0C
-#define ERROR_INVFIDTX 0x0D
-#define ERROR_TESTACT 0x0E
-#define ERROR_TAGNFND 0x12
-#define ERROR_DECODE 0x20
-#define ERROR_DESCUNAV 0x21
-#define ERROR_BADLEN 0x22
-#define ERROR_MODE 0x80
-#define ERROR_HOP 0x81
-#define ERROR_BINTER 0x82
-#define ERROR_RXMODE 0x83
-#define ERROR_MACADDR 0x84
-#define ERROR_RATES 0x85
-#define ERROR_ORDER 0x86
-#define ERROR_SCAN 0x87
-#define ERROR_AUTH 0x88
-#define ERROR_PSMODE 0x89
-#define ERROR_RTYPE 0x8A
-#define ERROR_DIVER 0x8B
-#define ERROR_SSID 0x8C
-#define ERROR_APLIST 0x8D
-#define ERROR_AUTOWAKE 0x8E
-#define ERROR_LEAP 0x8F
-
-/* Registers */
-#define COMMAND 0x00
-#define PARAM0 0x02
-#define PARAM1 0x04
-#define PARAM2 0x06
-#define STATUS 0x08
-#define RESP0 0x0a
-#define RESP1 0x0c
-#define RESP2 0x0e
-#define LINKSTAT 0x10
-#define SELECT0 0x18
-#define OFFSET0 0x1c
-#define RXFID 0x20
-#define TXALLOCFID 0x22
-#define TXCOMPLFID 0x24
-#define DATA0 0x36
-#define EVSTAT 0x30
-#define EVINTEN 0x32
-#define EVACK 0x34
-#define SWS0 0x28
-#define SWS1 0x2a
-#define SWS2 0x2c
-#define SWS3 0x2e
-#define AUXPAGE 0x3A
-#define AUXOFF 0x3C
-#define AUXDATA 0x3E
-
-#define FID_TX 1
-#define FID_RX 2
-/* Offset into aux memory for descriptors */
-#define AUX_OFFSET 0x800
-/* Size of allocated packets */
-#define PKTSIZE 1840
-#define RIDSIZE 2048
-/* Size of the transmit queue */
-#define MAXTXQ 64
-
-/* BAP selectors */
-#define BAP0 0 /* Used for receiving packets */
-#define BAP1 2 /* Used for xmiting packets and working with RIDS */
-
-/* Flags */
-#define COMMAND_BUSY 0x8000
-
-#define BAP_BUSY 0x8000
-#define BAP_ERR 0x4000
-#define BAP_DONE 0x2000
-
-#define PROMISC 0xffff
-#define NOPROMISC 0x0000
-
-#define EV_CMD 0x10
-#define EV_CLEARCOMMANDBUSY 0x4000
-#define EV_RX 0x01
-#define EV_TX 0x02
-#define EV_TXEXC 0x04
-#define EV_ALLOC 0x08
-#define EV_LINK 0x80
-#define EV_AWAKE 0x100
-#define EV_TXCPY 0x400
-#define EV_UNKNOWN 0x800
-#define EV_MIC 0x1000 /* Message Integrity Check Interrupt */
-#define EV_AWAKEN 0x2000
-#define STATUS_INTS (EV_AWAKE|EV_LINK|EV_TXEXC|EV_TX|EV_TXCPY|EV_RX|EV_MIC)
-
-#ifdef CHECK_UNKNOWN_INTS
-#define IGNORE_INTS ( EV_CMD | EV_UNKNOWN)
-#else
-#define IGNORE_INTS (~STATUS_INTS)
-#endif
-
-/* RID TYPES */
-#define RID_RW 0x20
-
-/* The RIDs */
-#define RID_CAPABILITIES 0xFF00
-#define RID_APINFO 0xFF01
-#define RID_RADIOINFO 0xFF02
-#define RID_UNKNOWN3 0xFF03
-#define RID_RSSI 0xFF04
-#define RID_CONFIG 0xFF10
-#define RID_SSID 0xFF11
-#define RID_APLIST 0xFF12
-#define RID_DRVNAME 0xFF13
-#define RID_ETHERENCAP 0xFF14
-#define RID_WEP_TEMP 0xFF15
-#define RID_WEP_PERM 0xFF16
-#define RID_MODULATION 0xFF17
-#define RID_OPTIONS 0xFF18
-#define RID_ACTUALCONFIG 0xFF20 /*readonly*/
-#define RID_FACTORYCONFIG 0xFF21
-#define RID_UNKNOWN22 0xFF22
-#define RID_LEAPUSERNAME 0xFF23
-#define RID_LEAPPASSWORD 0xFF24
-#define RID_STATUS 0xFF50
-#define RID_BEACON_HST 0xFF51
-#define RID_BUSY_HST 0xFF52
-#define RID_RETRIES_HST 0xFF53
-#define RID_UNKNOWN54 0xFF54
-#define RID_UNKNOWN55 0xFF55
-#define RID_UNKNOWN56 0xFF56
-#define RID_MIC 0xFF57
-#define RID_STATS16 0xFF60
-#define RID_STATS16DELTA 0xFF61
-#define RID_STATS16DELTACLEAR 0xFF62
-#define RID_STATS 0xFF68
-#define RID_STATSDELTA 0xFF69
-#define RID_STATSDELTACLEAR 0xFF6A
-#define RID_ECHOTEST_RID 0xFF70
-#define RID_ECHOTEST_RESULTS 0xFF71
-#define RID_BSSLISTFIRST 0xFF72
-#define RID_BSSLISTNEXT 0xFF73
-#define RID_WPA_BSSLISTFIRST 0xFF74
-#define RID_WPA_BSSLISTNEXT 0xFF75
-
-typedef struct {
- u16 cmd;
- u16 parm0;
- u16 parm1;
- u16 parm2;
-} Cmd;
-
-typedef struct {
- u16 status;
- u16 rsp0;
- u16 rsp1;
- u16 rsp2;
-} Resp;
-
-/*
- * Rids and endian-ness: The Rids will always be in cpu endian, since
- * this all the patches from the big-endian guys end up doing that.
- * so all rid access should use the read/writeXXXRid routines.
- */
-
-/* This structure came from an email sent to me from an engineer at
- aironet for inclusion into this driver */
-typedef struct WepKeyRid WepKeyRid;
-struct WepKeyRid {
- __le16 len;
- __le16 kindex;
- u8 mac[ETH_ALEN];
- __le16 klen;
- u8 key[16];
-} __attribute__ ((packed));
-
-/* These structures are from the Aironet's PC4500 Developers Manual */
-typedef struct Ssid Ssid;
-struct Ssid {
- __le16 len;
- u8 ssid[32];
-} __attribute__ ((packed));
-
-typedef struct SsidRid SsidRid;
-struct SsidRid {
- __le16 len;
- Ssid ssids[3];
-} __attribute__ ((packed));
-
-typedef struct ModulationRid ModulationRid;
-struct ModulationRid {
- __le16 len;
- __le16 modulation;
-#define MOD_DEFAULT cpu_to_le16(0)
-#define MOD_CCK cpu_to_le16(1)
-#define MOD_MOK cpu_to_le16(2)
-} __attribute__ ((packed));
-
-typedef struct ConfigRid ConfigRid;
-struct ConfigRid {
- __le16 len; /* sizeof(ConfigRid) */
- __le16 opmode; /* operating mode */
-#define MODE_STA_IBSS cpu_to_le16(0)
-#define MODE_STA_ESS cpu_to_le16(1)
-#define MODE_AP cpu_to_le16(2)
-#define MODE_AP_RPTR cpu_to_le16(3)
-#define MODE_CFG_MASK cpu_to_le16(0xff)
-#define MODE_ETHERNET_HOST cpu_to_le16(0<<8) /* rx payloads converted */
-#define MODE_LLC_HOST cpu_to_le16(1<<8) /* rx payloads left as is */
-#define MODE_AIRONET_EXTEND cpu_to_le16(1<<9) /* enable Aironet extenstions */
-#define MODE_AP_INTERFACE cpu_to_le16(1<<10) /* enable ap interface extensions */
-#define MODE_ANTENNA_ALIGN cpu_to_le16(1<<11) /* enable antenna alignment */
-#define MODE_ETHER_LLC cpu_to_le16(1<<12) /* enable ethernet LLC */
-#define MODE_LEAF_NODE cpu_to_le16(1<<13) /* enable leaf node bridge */
-#define MODE_CF_POLLABLE cpu_to_le16(1<<14) /* enable CF pollable */
-#define MODE_MIC cpu_to_le16(1<<15) /* enable MIC */
- __le16 rmode; /* receive mode */
-#define RXMODE_BC_MC_ADDR cpu_to_le16(0)
-#define RXMODE_BC_ADDR cpu_to_le16(1) /* ignore multicasts */
-#define RXMODE_ADDR cpu_to_le16(2) /* ignore multicast and broadcast */
-#define RXMODE_RFMON cpu_to_le16(3) /* wireless monitor mode */
-#define RXMODE_RFMON_ANYBSS cpu_to_le16(4)
-#define RXMODE_LANMON cpu_to_le16(5) /* lan style monitor -- data packets only */
-#define RXMODE_MASK cpu_to_le16(255)
-#define RXMODE_DISABLE_802_3_HEADER cpu_to_le16(1<<8) /* disables 802.3 header on rx */
-#define RXMODE_FULL_MASK (RXMODE_MASK | RXMODE_DISABLE_802_3_HEADER)
-#define RXMODE_NORMALIZED_RSSI cpu_to_le16(1<<9) /* return normalized RSSI */
- __le16 fragThresh;
- __le16 rtsThres;
- u8 macAddr[ETH_ALEN];
- u8 rates[8];
- __le16 shortRetryLimit;
- __le16 longRetryLimit;
- __le16 txLifetime; /* in kusec */
- __le16 rxLifetime; /* in kusec */
- __le16 stationary;
- __le16 ordering;
- __le16 u16deviceType; /* for overriding device type */
- __le16 cfpRate;
- __le16 cfpDuration;
- __le16 _reserved1[3];
- /*---------- Scanning/Associating ----------*/
- __le16 scanMode;
-#define SCANMODE_ACTIVE cpu_to_le16(0)
-#define SCANMODE_PASSIVE cpu_to_le16(1)
-#define SCANMODE_AIROSCAN cpu_to_le16(2)
- __le16 probeDelay; /* in kusec */
- __le16 probeEnergyTimeout; /* in kusec */
- __le16 probeResponseTimeout;
- __le16 beaconListenTimeout;
- __le16 joinNetTimeout;
- __le16 authTimeout;
- __le16 authType;
-#define AUTH_OPEN cpu_to_le16(0x1)
-#define AUTH_ENCRYPT cpu_to_le16(0x101)
-#define AUTH_SHAREDKEY cpu_to_le16(0x102)
-#define AUTH_ALLOW_UNENCRYPTED cpu_to_le16(0x200)
- __le16 associationTimeout;
- __le16 specifiedApTimeout;
- __le16 offlineScanInterval;
- __le16 offlineScanDuration;
- __le16 linkLossDelay;
- __le16 maxBeaconLostTime;
- __le16 refreshInterval;
-#define DISABLE_REFRESH cpu_to_le16(0xFFFF)
- __le16 _reserved1a[1];
- /*---------- Power save operation ----------*/
- __le16 powerSaveMode;
-#define POWERSAVE_CAM cpu_to_le16(0)
-#define POWERSAVE_PSP cpu_to_le16(1)
-#define POWERSAVE_PSPCAM cpu_to_le16(2)
- __le16 sleepForDtims;
- __le16 listenInterval;
- __le16 fastListenInterval;
- __le16 listenDecay;
- __le16 fastListenDelay;
- __le16 _reserved2[2];
- /*---------- Ap/Ibss config items ----------*/
- __le16 beaconPeriod;
- __le16 atimDuration;
- __le16 hopPeriod;
- __le16 channelSet;
- __le16 channel;
- __le16 dtimPeriod;
- __le16 bridgeDistance;
- __le16 radioID;
- /*---------- Radio configuration ----------*/
- __le16 radioType;
-#define RADIOTYPE_DEFAULT cpu_to_le16(0)
-#define RADIOTYPE_802_11 cpu_to_le16(1)
-#define RADIOTYPE_LEGACY cpu_to_le16(2)
- u8 rxDiversity;
- u8 txDiversity;
- __le16 txPower;
-#define TXPOWER_DEFAULT 0
- __le16 rssiThreshold;
-#define RSSI_DEFAULT 0
- __le16 modulation;
-#define PREAMBLE_AUTO cpu_to_le16(0)
-#define PREAMBLE_LONG cpu_to_le16(1)
-#define PREAMBLE_SHORT cpu_to_le16(2)
- __le16 preamble;
- __le16 homeProduct;
- __le16 radioSpecific;
- /*---------- Aironet Extensions ----------*/
- u8 nodeName[16];
- __le16 arlThreshold;
- __le16 arlDecay;
- __le16 arlDelay;
- __le16 _reserved4[1];
- /*---------- Aironet Extensions ----------*/
- u8 magicAction;
-#define MAGIC_ACTION_STSCHG 1
-#define MAGIC_ACTION_RESUME 2
-#define MAGIC_IGNORE_MCAST (1<<8)
-#define MAGIC_IGNORE_BCAST (1<<9)
-#define MAGIC_SWITCH_TO_PSP (0<<10)
-#define MAGIC_STAY_IN_CAM (1<<10)
- u8 magicControl;
- __le16 autoWake;
-} __attribute__ ((packed));
-
-typedef struct StatusRid StatusRid;
-struct StatusRid {
- __le16 len;
- u8 mac[ETH_ALEN];
- __le16 mode;
- __le16 errorCode;
- __le16 sigQuality;
- __le16 SSIDlen;
- char SSID[32];
- char apName[16];
- u8 bssid[4][ETH_ALEN];
- __le16 beaconPeriod;
- __le16 dimPeriod;
- __le16 atimDuration;
- __le16 hopPeriod;
- __le16 channelSet;
- __le16 channel;
- __le16 hopsToBackbone;
- __le16 apTotalLoad;
- __le16 generatedLoad;
- __le16 accumulatedArl;
- __le16 signalQuality;
- __le16 currentXmitRate;
- __le16 apDevExtensions;
- __le16 normalizedSignalStrength;
- __le16 shortPreamble;
- u8 apIP[4];
- u8 noisePercent; /* Noise percent in last second */
- u8 noisedBm; /* Noise dBm in last second */
- u8 noiseAvePercent; /* Noise percent in last minute */
- u8 noiseAvedBm; /* Noise dBm in last minute */
- u8 noiseMaxPercent; /* Highest noise percent in last minute */
- u8 noiseMaxdBm; /* Highest noise dbm in last minute */
- __le16 load;
- u8 carrier[4];
- __le16 assocStatus;
-#define STAT_NOPACKETS 0
-#define STAT_NOCARRIERSET 10
-#define STAT_GOTCARRIERSET 11
-#define STAT_WRONGSSID 20
-#define STAT_BADCHANNEL 25
-#define STAT_BADBITRATES 30
-#define STAT_BADPRIVACY 35
-#define STAT_APFOUND 40
-#define STAT_APREJECTED 50
-#define STAT_AUTHENTICATING 60
-#define STAT_DEAUTHENTICATED 61
-#define STAT_AUTHTIMEOUT 62
-#define STAT_ASSOCIATING 70
-#define STAT_DEASSOCIATED 71
-#define STAT_ASSOCTIMEOUT 72
-#define STAT_NOTAIROAP 73
-#define STAT_ASSOCIATED 80
-#define STAT_LEAPING 90
-#define STAT_LEAPFAILED 91
-#define STAT_LEAPTIMEDOUT 92
-#define STAT_LEAPCOMPLETE 93
-} __attribute__ ((packed));
-
-typedef struct StatsRid StatsRid;
-struct StatsRid {
- __le16 len;
- __le16 spacer;
- __le32 vals[100];
-} __attribute__ ((packed));
-
-typedef struct APListRid APListRid;
-struct APListRid {
- __le16 len;
- u8 ap[4][ETH_ALEN];
-} __attribute__ ((packed));
-
-typedef struct CapabilityRid CapabilityRid;
-struct CapabilityRid {
- __le16 len;
- char oui[3];
- char zero;
- __le16 prodNum;
- char manName[32];
- char prodName[16];
- char prodVer[8];
- char factoryAddr[ETH_ALEN];
- char aironetAddr[ETH_ALEN];
- __le16 radioType;
- __le16 country;
- char callid[ETH_ALEN];
- char supportedRates[8];
- char rxDiversity;
- char txDiversity;
- __le16 txPowerLevels[8];
- __le16 hardVer;
- __le16 hardCap;
- __le16 tempRange;
- __le16 softVer;
- __le16 softSubVer;
- __le16 interfaceVer;
- __le16 softCap;
- __le16 bootBlockVer;
- __le16 requiredHard;
- __le16 extSoftCap;
-} __attribute__ ((packed));
-
-/* Only present on firmware >= 5.30.17 */
-typedef struct BSSListRidExtra BSSListRidExtra;
-struct BSSListRidExtra {
- __le16 unknown[4];
- u8 fixed[12]; /* WLAN management frame */
- u8 iep[624];
-} __attribute__ ((packed));
-
-typedef struct BSSListRid BSSListRid;
-struct BSSListRid {
- __le16 len;
- __le16 index; /* First is 0 and 0xffff means end of list */
-#define RADIO_FH 1 /* Frequency hopping radio type */
-#define RADIO_DS 2 /* Direct sequence radio type */
-#define RADIO_TMA 4 /* Proprietary radio used in old cards (2500) */
- __le16 radioType;
- u8 bssid[ETH_ALEN]; /* Mac address of the BSS */
- u8 zero;
- u8 ssidLen;
- u8 ssid[32];
- __le16 dBm;
-#define CAP_ESS cpu_to_le16(1<<0)
-#define CAP_IBSS cpu_to_le16(1<<1)
-#define CAP_PRIVACY cpu_to_le16(1<<4)
-#define CAP_SHORTHDR cpu_to_le16(1<<5)
- __le16 cap;
- __le16 beaconInterval;
- u8 rates[8]; /* Same as rates for config rid */
- struct { /* For frequency hopping only */
- __le16 dwell;
- u8 hopSet;
- u8 hopPattern;
- u8 hopIndex;
- u8 fill;
- } fh;
- __le16 dsChannel;
- __le16 atimWindow;
-
- /* Only present on firmware >= 5.30.17 */
- BSSListRidExtra extra;
-} __attribute__ ((packed));
-
-typedef struct {
- BSSListRid bss;
- struct list_head list;
-} BSSListElement;
-
-typedef struct tdsRssiEntry tdsRssiEntry;
-struct tdsRssiEntry {
- u8 rssipct;
- u8 rssidBm;
-} __attribute__ ((packed));
-
-typedef struct tdsRssiRid tdsRssiRid;
-struct tdsRssiRid {
- u16 len;
- tdsRssiEntry x[256];
-} __attribute__ ((packed));
-
-typedef struct MICRid MICRid;
-struct MICRid {
- __le16 len;
- __le16 state;
- __le16 multicastValid;
- u8 multicast[16];
- __le16 unicastValid;
- u8 unicast[16];
-} __attribute__ ((packed));
-
-typedef struct MICBuffer MICBuffer;
-struct MICBuffer {
- __be16 typelen;
-
- union {
- u8 snap[8];
- struct {
- u8 dsap;
- u8 ssap;
- u8 control;
- u8 orgcode[3];
- u8 fieldtype[2];
- } llc;
- } u;
- __be32 mic;
- __be32 seq;
-} __attribute__ ((packed));
-
-typedef struct {
- u8 da[ETH_ALEN];
- u8 sa[ETH_ALEN];
-} etherHead;
-
-#define TXCTL_TXOK (1<<1) /* report if tx is ok */
-#define TXCTL_TXEX (1<<2) /* report if tx fails */
-#define TXCTL_802_3 (0<<3) /* 802.3 packet */
-#define TXCTL_802_11 (1<<3) /* 802.11 mac packet */
-#define TXCTL_ETHERNET (0<<4) /* payload has ethertype */
-#define TXCTL_LLC (1<<4) /* payload is llc */
-#define TXCTL_RELEASE (0<<5) /* release after completion */
-#define TXCTL_NORELEASE (1<<5) /* on completion returns to host */
-
-#define BUSY_FID 0x10000
-
-#ifdef CISCO_EXT
-#define AIROMAGIC 0xa55a
-/* Warning : SIOCDEVPRIVATE may disapear during 2.5.X - Jean II */
-#ifdef SIOCIWFIRSTPRIV
-#ifdef SIOCDEVPRIVATE
-#define AIROOLDIOCTL SIOCDEVPRIVATE
-#define AIROOLDIDIFC AIROOLDIOCTL + 1
-#endif /* SIOCDEVPRIVATE */
-#else /* SIOCIWFIRSTPRIV */
-#define SIOCIWFIRSTPRIV SIOCDEVPRIVATE
-#endif /* SIOCIWFIRSTPRIV */
-/* This may be wrong. When using the new SIOCIWFIRSTPRIV range, we probably
- * should use only "GET" ioctls (last bit set to 1). "SET" ioctls are root
- * only and don't return the modified struct ifreq to the application which
- * is usually a problem. - Jean II */
-#define AIROIOCTL SIOCIWFIRSTPRIV
-#define AIROIDIFC AIROIOCTL + 1
-
-/* Ioctl constants to be used in airo_ioctl.command */
-
-#define AIROGCAP 0 // Capability rid
-#define AIROGCFG 1 // USED A LOT
-#define AIROGSLIST 2 // System ID list
-#define AIROGVLIST 3 // List of specified AP's
-#define AIROGDRVNAM 4 // NOTUSED
-#define AIROGEHTENC 5 // NOTUSED
-#define AIROGWEPKTMP 6
-#define AIROGWEPKNV 7
-#define AIROGSTAT 8
-#define AIROGSTATSC32 9
-#define AIROGSTATSD32 10
-#define AIROGMICRID 11
-#define AIROGMICSTATS 12
-#define AIROGFLAGS 13
-#define AIROGID 14
-#define AIRORRID 15
-#define AIRORSWVERSION 17
-
-/* Leave gap of 40 commands after AIROGSTATSD32 for future */
-
-#define AIROPCAP AIROGSTATSD32 + 40
-#define AIROPVLIST AIROPCAP + 1
-#define AIROPSLIST AIROPVLIST + 1
-#define AIROPCFG AIROPSLIST + 1
-#define AIROPSIDS AIROPCFG + 1
-#define AIROPAPLIST AIROPSIDS + 1
-#define AIROPMACON AIROPAPLIST + 1 /* Enable mac */
-#define AIROPMACOFF AIROPMACON + 1 /* Disable mac */
-#define AIROPSTCLR AIROPMACOFF + 1
-#define AIROPWEPKEY AIROPSTCLR + 1
-#define AIROPWEPKEYNV AIROPWEPKEY + 1
-#define AIROPLEAPPWD AIROPWEPKEYNV + 1
-#define AIROPLEAPUSR AIROPLEAPPWD + 1
-
-/* Flash codes */
-
-#define AIROFLSHRST AIROPWEPKEYNV + 40
-#define AIROFLSHGCHR AIROFLSHRST + 1
-#define AIROFLSHSTFL AIROFLSHGCHR + 1
-#define AIROFLSHPCHR AIROFLSHSTFL + 1
-#define AIROFLPUTBUF AIROFLSHPCHR + 1
-#define AIRORESTART AIROFLPUTBUF + 1
-
-#define FLASHSIZE 32768
-#define AUXMEMSIZE (256 * 1024)
-
-typedef struct aironet_ioctl {
- unsigned short command; // What to do
- unsigned short len; // Len of data
- unsigned short ridnum; // rid number
- unsigned char __user *data; // d-data
-} aironet_ioctl;
-
-static char swversion[] = "2.1";
-#endif /* CISCO_EXT */
-
-#define NUM_MODULES 2
-#define MIC_MSGLEN_MAX 2400
-#define EMMH32_MSGLEN_MAX MIC_MSGLEN_MAX
-#define AIRO_DEF_MTU 2312
-
-typedef struct {
- u32 size; // size
- u8 enabled; // MIC enabled or not
- u32 rxSuccess; // successful packets received
- u32 rxIncorrectMIC; // pkts dropped due to incorrect MIC comparison
- u32 rxNotMICed; // pkts dropped due to not being MIC'd
- u32 rxMICPlummed; // pkts dropped due to not having a MIC plummed
- u32 rxWrongSequence; // pkts dropped due to sequence number violation
- u32 reserve[32];
-} mic_statistics;
-
-typedef struct {
- u32 coeff[((EMMH32_MSGLEN_MAX)+3)>>2];
- u64 accum; // accumulated mic, reduced to u32 in final()
- int position; // current position (byte offset) in message
- union {
- u8 d8[4];
- __be32 d32;
- } part; // saves partial message word across update() calls
-} emmh32_context;
-
-typedef struct {
- emmh32_context seed; // Context - the seed
- u32 rx; // Received sequence number
- u32 tx; // Tx sequence number
- u32 window; // Start of window
- u8 valid; // Flag to say if context is valid or not
- u8 key[16];
-} miccntx;
-
-typedef struct {
- miccntx mCtx; // Multicast context
- miccntx uCtx; // Unicast context
-} mic_module;
-
-typedef struct {
- unsigned int rid: 16;
- unsigned int len: 15;
- unsigned int valid: 1;
- dma_addr_t host_addr;
-} Rid;
-
-typedef struct {
- unsigned int offset: 15;
- unsigned int eoc: 1;
- unsigned int len: 15;
- unsigned int valid: 1;
- dma_addr_t host_addr;
-} TxFid;
-
-struct rx_hdr {
- __le16 status, len;
- u8 rssi[2];
- u8 rate;
- u8 freq;
- __le16 tmp[4];
-} __attribute__ ((packed));
-
-typedef struct {
- unsigned int ctl: 15;
- unsigned int rdy: 1;
- unsigned int len: 15;
- unsigned int valid: 1;
- dma_addr_t host_addr;
-} RxFid;
-
-/*
- * Host receive descriptor
- */
-typedef struct {
- unsigned char __iomem *card_ram_off; /* offset into card memory of the
- desc */
- RxFid rx_desc; /* card receive descriptor */
- char *virtual_host_addr; /* virtual address of host receive
- buffer */
- int pending;
-} HostRxDesc;
-
-/*
- * Host transmit descriptor
- */
-typedef struct {
- unsigned char __iomem *card_ram_off; /* offset into card memory of the
- desc */
- TxFid tx_desc; /* card transmit descriptor */
- char *virtual_host_addr; /* virtual address of host receive
- buffer */
- int pending;
-} HostTxDesc;
-
-/*
- * Host RID descriptor
- */
-typedef struct {
- unsigned char __iomem *card_ram_off; /* offset into card memory of the
- descriptor */
- Rid rid_desc; /* card RID descriptor */
- char *virtual_host_addr; /* virtual address of host receive
- buffer */
-} HostRidDesc;
-
-typedef struct {
- u16 sw0;
- u16 sw1;
- u16 status;
- u16 len;
-#define HOST_SET (1 << 0)
-#define HOST_INT_TX (1 << 1) /* Interrupt on successful TX */
-#define HOST_INT_TXERR (1 << 2) /* Interrupt on unseccessful TX */
-#define HOST_LCC_PAYLOAD (1 << 4) /* LLC payload, 0 = Ethertype */
-#define HOST_DONT_RLSE (1 << 5) /* Don't release buffer when done */
-#define HOST_DONT_RETRY (1 << 6) /* Don't retry trasmit */
-#define HOST_CLR_AID (1 << 7) /* clear AID failure */
-#define HOST_RTS (1 << 9) /* Force RTS use */
-#define HOST_SHORT (1 << 10) /* Do short preamble */
- u16 ctl;
- u16 aid;
- u16 retries;
- u16 fill;
-} TxCtlHdr;
-
-typedef struct {
- u16 ctl;
- u16 duration;
- char addr1[6];
- char addr2[6];
- char addr3[6];
- u16 seq;
- char addr4[6];
-} WifiHdr;
-
-
-typedef struct {
- TxCtlHdr ctlhdr;
- u16 fill1;
- u16 fill2;
- WifiHdr wifihdr;
- u16 gaplen;
- u16 status;
-} WifiCtlHdr;
-
-static WifiCtlHdr wifictlhdr8023 = {
- .ctlhdr = {
- .ctl = HOST_DONT_RLSE,
- }
-};
-
-// A few details needed for WEP (Wireless Equivalent Privacy)
-#define MAX_KEY_SIZE 13 // 128 (?) bits
-#define MIN_KEY_SIZE 5 // 40 bits RC4 - WEP
-typedef struct wep_key_t {
- u16 len;
- u8 key[16]; /* 40-bit and 104-bit keys */
-} wep_key_t;
-
-/* List of Wireless Handlers (new API) */
-static const struct iw_handler_def airo_handler_def;
-
-static const char version[] = "airo.c 0.6 (Ben Reed & Javier Achirica)";
-
-struct airo_info;
-
-static int get_dec_u16( char *buffer, int *start, int limit );
-static void OUT4500( struct airo_info *, u16 register, u16 value );
-static unsigned short IN4500( struct airo_info *, u16 register );
-static u16 setup_card(struct airo_info*, u8 *mac, int lock);
-static int enable_MAC(struct airo_info *ai, int lock);
-static void disable_MAC(struct airo_info *ai, int lock);
-static void enable_interrupts(struct airo_info*);
-static void disable_interrupts(struct airo_info*);
-static u16 issuecommand(struct airo_info*, Cmd *pCmd, Resp *pRsp);
-static int bap_setup(struct airo_info*, u16 rid, u16 offset, int whichbap);
-static int aux_bap_read(struct airo_info*, __le16 *pu16Dst, int bytelen,
- int whichbap);
-static int fast_bap_read(struct airo_info*, __le16 *pu16Dst, int bytelen,
- int whichbap);
-static int bap_write(struct airo_info*, const __le16 *pu16Src, int bytelen,
- int whichbap);
-static int PC4500_accessrid(struct airo_info*, u16 rid, u16 accmd);
-static int PC4500_readrid(struct airo_info*, u16 rid, void *pBuf, int len, int lock);
-static int PC4500_writerid(struct airo_info*, u16 rid, const void
- *pBuf, int len, int lock);
-static int do_writerid( struct airo_info*, u16 rid, const void *rid_data,
- int len, int dummy );
-static u16 transmit_allocate(struct airo_info*, int lenPayload, int raw);
-static int transmit_802_3_packet(struct airo_info*, int len, char *pPacket);
-static int transmit_802_11_packet(struct airo_info*, int len, char *pPacket);
-
-static int mpi_send_packet (struct net_device *dev);
-static void mpi_unmap_card(struct pci_dev *pci);
-static void mpi_receive_802_3(struct airo_info *ai);
-static void mpi_receive_802_11(struct airo_info *ai);
-static int waitbusy (struct airo_info *ai);
-
-static irqreturn_t airo_interrupt( int irq, void* dev_id);
-static int airo_thread(void *data);
-static void timer_func( struct net_device *dev );
-static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static struct iw_statistics *airo_get_wireless_stats (struct net_device *dev);
-static void airo_read_wireless_stats (struct airo_info *local);
-#ifdef CISCO_EXT
-static int readrids(struct net_device *dev, aironet_ioctl *comp);
-static int writerids(struct net_device *dev, aironet_ioctl *comp);
-static int flashcard(struct net_device *dev, aironet_ioctl *comp);
-#endif /* CISCO_EXT */
-static void micinit(struct airo_info *ai);
-static int micsetup(struct airo_info *ai);
-static int encapsulate(struct airo_info *ai, etherHead *pPacket, MICBuffer *buffer, int len);
-static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *pPacket, u16 payLen);
-
-static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi);
-static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm);
-
-static void airo_networks_free(struct airo_info *ai);
-
-struct airo_info {
- struct net_device *dev;
- struct list_head dev_list;
- /* Note, we can have MAX_FIDS outstanding. FIDs are 16-bits, so we
- use the high bit to mark whether it is in use. */
-#define MAX_FIDS 6
-#define MPI_MAX_FIDS 1
- u32 fids[MAX_FIDS];
- ConfigRid config;
- char keyindex; // Used with auto wep
- char defindex; // Used with auto wep
- struct proc_dir_entry *proc_entry;
- spinlock_t aux_lock;
-#define FLAG_RADIO_OFF 0 /* User disabling of MAC */
-#define FLAG_RADIO_DOWN 1 /* ifup/ifdown disabling of MAC */
-#define FLAG_RADIO_MASK 0x03
-#define FLAG_ENABLED 2
-#define FLAG_ADHOC 3 /* Needed by MIC */
-#define FLAG_MIC_CAPABLE 4
-#define FLAG_UPDATE_MULTI 5
-#define FLAG_UPDATE_UNI 6
-#define FLAG_802_11 7
-#define FLAG_PROMISC 8 /* IFF_PROMISC 0x100 - include/linux/if.h */
-#define FLAG_PENDING_XMIT 9
-#define FLAG_PENDING_XMIT11 10
-#define FLAG_MPI 11
-#define FLAG_REGISTERED 12
-#define FLAG_COMMIT 13
-#define FLAG_RESET 14
-#define FLAG_FLASHING 15
-#define FLAG_WPA_CAPABLE 16
- unsigned long flags;
-#define JOB_DIE 0
-#define JOB_XMIT 1
-#define JOB_XMIT11 2
-#define JOB_STATS 3
-#define JOB_PROMISC 4
-#define JOB_MIC 5
-#define JOB_EVENT 6
-#define JOB_AUTOWEP 7
-#define JOB_WSTATS 8
-#define JOB_SCAN_RESULTS 9
- unsigned long jobs;
- int (*bap_read)(struct airo_info*, __le16 *pu16Dst, int bytelen,
- int whichbap);
- unsigned short *flash;
- tdsRssiEntry *rssi;
- struct task_struct *list_bss_task;
- struct task_struct *airo_thread_task;
- struct semaphore sem;
- wait_queue_head_t thr_wait;
- unsigned long expires;
- struct {
- struct sk_buff *skb;
- int fid;
- } xmit, xmit11;
- struct net_device *wifidev;
- struct iw_statistics wstats; // wireless stats
- unsigned long scan_timeout; /* Time scan should be read */
- struct iw_spy_data spy_data;
- struct iw_public_data wireless_data;
- /* MIC stuff */
- struct crypto_cipher *tfm;
- mic_module mod[2];
- mic_statistics micstats;
- HostRxDesc rxfids[MPI_MAX_FIDS]; // rx/tx/config MPI350 descriptors
- HostTxDesc txfids[MPI_MAX_FIDS];
- HostRidDesc config_desc;
- unsigned long ridbus; // phys addr of config_desc
- struct sk_buff_head txq;// tx queue used by mpi350 code
- struct pci_dev *pci;
- unsigned char __iomem *pcimem;
- unsigned char __iomem *pciaux;
- unsigned char *shared;
- dma_addr_t shared_dma;
- pm_message_t power;
- SsidRid *SSID;
- APListRid *APList;
-#define PCI_SHARED_LEN 2*MPI_MAX_FIDS*PKTSIZE+RIDSIZE
- char proc_name[IFNAMSIZ];
-
- int wep_capable;
- int max_wep_idx;
-
- /* WPA-related stuff */
- unsigned int bssListFirst;
- unsigned int bssListNext;
- unsigned int bssListRidLen;
-
- struct list_head network_list;
- struct list_head network_free_list;
- BSSListElement *networks;
-};
-
-static inline int bap_read(struct airo_info *ai, __le16 *pu16Dst, int bytelen,
- int whichbap)
-{
- return ai->bap_read(ai, pu16Dst, bytelen, whichbap);
-}
-
-static int setup_proc_entry( struct net_device *dev,
- struct airo_info *apriv );
-static int takedown_proc_entry( struct net_device *dev,
- struct airo_info *apriv );
-
-static int cmdreset(struct airo_info *ai);
-static int setflashmode (struct airo_info *ai);
-static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime);
-static int flashputbuf(struct airo_info *ai);
-static int flashrestart(struct airo_info *ai,struct net_device *dev);
-
-#define airo_print(type, name, fmt, args...) \
- printk(type DRV_NAME "(%s): " fmt "\n", name, ##args)
-
-#define airo_print_info(name, fmt, args...) \
- airo_print(KERN_INFO, name, fmt, ##args)
-
-#define airo_print_dbg(name, fmt, args...) \
- airo_print(KERN_DEBUG, name, fmt, ##args)
-
-#define airo_print_warn(name, fmt, args...) \
- airo_print(KERN_WARNING, name, fmt, ##args)
-
-#define airo_print_err(name, fmt, args...) \
- airo_print(KERN_ERR, name, fmt, ##args)
-
-#define AIRO_FLASH(dev) (((struct airo_info *)dev->ml_priv)->flash)
-
-/***********************************************************************
- * MIC ROUTINES *
- ***********************************************************************
- */
-
-static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq);
-static void MoveWindow(miccntx *context, u32 micSeq);
-static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
- struct crypto_cipher *tfm);
-static void emmh32_init(emmh32_context *context);
-static void emmh32_update(emmh32_context *context, u8 *pOctets, int len);
-static void emmh32_final(emmh32_context *context, u8 digest[4]);
-static int flashpchar(struct airo_info *ai,int byte,int dwelltime);
-
-static void age_mic_context(miccntx *cur, miccntx *old, u8 *key, int key_len,
- struct crypto_cipher *tfm)
-{
- /* If the current MIC context is valid and its key is the same as
- * the MIC register, there's nothing to do.
- */
- if (cur->valid && (memcmp(cur->key, key, key_len) == 0))
- return;
-
- /* Age current mic Context */
- memcpy(old, cur, sizeof(*cur));
-
- /* Initialize new context */
- memcpy(cur->key, key, key_len);
- cur->window = 33; /* Window always points to the middle */
- cur->rx = 0; /* Rx Sequence numbers */
- cur->tx = 0; /* Tx sequence numbers */
- cur->valid = 1; /* Key is now valid */
-
- /* Give key to mic seed */
- emmh32_setseed(&cur->seed, key, key_len, tfm);
-}
-
-/* micinit - Initialize mic seed */
-
-static void micinit(struct airo_info *ai)
-{
- MICRid mic_rid;
-
- clear_bit(JOB_MIC, &ai->jobs);
- PC4500_readrid(ai, RID_MIC, &mic_rid, sizeof(mic_rid), 0);
- up(&ai->sem);
-
- ai->micstats.enabled = (le16_to_cpu(mic_rid.state) & 0x00FF) ? 1 : 0;
- if (!ai->micstats.enabled) {
- /* So next time we have a valid key and mic is enabled, we will
- * update the sequence number if the key is the same as before.
- */
- ai->mod[0].uCtx.valid = 0;
- ai->mod[0].mCtx.valid = 0;
- return;
- }
-
- if (mic_rid.multicastValid) {
- age_mic_context(&ai->mod[0].mCtx, &ai->mod[1].mCtx,
- mic_rid.multicast, sizeof(mic_rid.multicast),
- ai->tfm);
- }
-
- if (mic_rid.unicastValid) {
- age_mic_context(&ai->mod[0].uCtx, &ai->mod[1].uCtx,
- mic_rid.unicast, sizeof(mic_rid.unicast),
- ai->tfm);
- }
-}
-
-/* micsetup - Get ready for business */
-
-static int micsetup(struct airo_info *ai) {
- int i;
-
- if (ai->tfm == NULL)
- ai->tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
-
- if (IS_ERR(ai->tfm)) {
- airo_print_err(ai->dev->name, "failed to load transform for AES");
- ai->tfm = NULL;
- return ERROR;
- }
-
- for (i=0; i < NUM_MODULES; i++) {
- memset(&ai->mod[i].mCtx,0,sizeof(miccntx));
- memset(&ai->mod[i].uCtx,0,sizeof(miccntx));
- }
- return SUCCESS;
-}
-
-static char micsnap[] = {0xAA,0xAA,0x03,0x00,0x40,0x96,0x00,0x02};
-
-/*===========================================================================
- * Description: Mic a packet
- *
- * Inputs: etherHead * pointer to an 802.3 frame
- *
- * Returns: BOOLEAN if successful, otherwise false.
- * PacketTxLen will be updated with the mic'd packets size.
- *
- * Caveats: It is assumed that the frame buffer will already
- * be big enough to hold the largets mic message possible.
- * (No memory allocation is done here).
- *
- * Author: sbraneky (10/15/01)
- * Merciless hacks by rwilcher (1/14/02)
- */
-
-static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, int payLen)
-{
- miccntx *context;
-
- // Determine correct context
- // If not adhoc, always use unicast key
-
- if (test_bit(FLAG_ADHOC, &ai->flags) && (frame->da[0] & 0x1))
- context = &ai->mod[0].mCtx;
- else
- context = &ai->mod[0].uCtx;
-
- if (!context->valid)
- return ERROR;
-
- mic->typelen = htons(payLen + 16); //Length of Mic'd packet
-
- memcpy(&mic->u.snap, micsnap, sizeof(micsnap)); // Add Snap
-
- // Add Tx sequence
- mic->seq = htonl(context->tx);
- context->tx += 2;
-
- emmh32_init(&context->seed); // Mic the packet
- emmh32_update(&context->seed,frame->da,ETH_ALEN * 2); // DA,SA
- emmh32_update(&context->seed,(u8*)&mic->typelen,10); // Type/Length and Snap
- emmh32_update(&context->seed,(u8*)&mic->seq,sizeof(mic->seq)); //SEQ
- emmh32_update(&context->seed,frame->da + ETH_ALEN * 2,payLen); //payload
- emmh32_final(&context->seed, (u8*)&mic->mic);
-
- /* New Type/length ?????????? */
- mic->typelen = 0; //Let NIC know it could be an oversized packet
- return SUCCESS;
-}
-
-typedef enum {
- NONE,
- NOMIC,
- NOMICPLUMMED,
- SEQUENCE,
- INCORRECTMIC,
-} mic_error;
-
-/*===========================================================================
- * Description: Decapsulates a MIC'd packet and returns the 802.3 packet
- * (removes the MIC stuff) if packet is a valid packet.
- *
- * Inputs: etherHead pointer to the 802.3 packet
- *
- * Returns: BOOLEAN - TRUE if packet should be dropped otherwise FALSE
- *
- * Author: sbraneky (10/15/01)
- * Merciless hacks by rwilcher (1/14/02)
- *---------------------------------------------------------------------------
- */
-
-static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16 payLen)
-{
- int i;
- u32 micSEQ;
- miccntx *context;
- u8 digest[4];
- mic_error micError = NONE;
-
- // Check if the packet is a Mic'd packet
-
- if (!ai->micstats.enabled) {
- //No Mic set or Mic OFF but we received a MIC'd packet.
- if (memcmp ((u8*)eth + 14, micsnap, sizeof(micsnap)) == 0) {
- ai->micstats.rxMICPlummed++;
- return ERROR;
- }
- return SUCCESS;
- }
-
- if (ntohs(mic->typelen) == 0x888E)
- return SUCCESS;
-
- if (memcmp (mic->u.snap, micsnap, sizeof(micsnap)) != 0) {
- // Mic enabled but packet isn't Mic'd
- ai->micstats.rxMICPlummed++;
- return ERROR;
- }
-
- micSEQ = ntohl(mic->seq); //store SEQ as CPU order
-
- //At this point we a have a mic'd packet and mic is enabled
- //Now do the mic error checking.
-
- //Receive seq must be odd
- if ( (micSEQ & 1) == 0 ) {
- ai->micstats.rxWrongSequence++;
- return ERROR;
- }
-
- for (i = 0; i < NUM_MODULES; i++) {
- int mcast = eth->da[0] & 1;
- //Determine proper context
- context = mcast ? &ai->mod[i].mCtx : &ai->mod[i].uCtx;
-
- //Make sure context is valid
- if (!context->valid) {
- if (i == 0)
- micError = NOMICPLUMMED;
- continue;
- }
- //DeMic it
-
- if (!mic->typelen)
- mic->typelen = htons(payLen + sizeof(MICBuffer) - 2);
-
- emmh32_init(&context->seed);
- emmh32_update(&context->seed, eth->da, ETH_ALEN*2);
- emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap));
- emmh32_update(&context->seed, (u8 *)&mic->seq,sizeof(mic->seq));
- emmh32_update(&context->seed, eth->da + ETH_ALEN*2,payLen);
- //Calculate MIC
- emmh32_final(&context->seed, digest);
-
- if (memcmp(digest, &mic->mic, 4)) { //Make sure the mics match
- //Invalid Mic
- if (i == 0)
- micError = INCORRECTMIC;
- continue;
- }
-
- //Check Sequence number if mics pass
- if (RxSeqValid(ai, context, mcast, micSEQ) == SUCCESS) {
- ai->micstats.rxSuccess++;
- return SUCCESS;
- }
- if (i == 0)
- micError = SEQUENCE;
- }
-
- // Update statistics
- switch (micError) {
- case NOMICPLUMMED: ai->micstats.rxMICPlummed++; break;
- case SEQUENCE: ai->micstats.rxWrongSequence++; break;
- case INCORRECTMIC: ai->micstats.rxIncorrectMIC++; break;
- case NONE: break;
- case NOMIC: break;
- }
- return ERROR;
-}
-
-/*===========================================================================
- * Description: Checks the Rx Seq number to make sure it is valid
- * and hasn't already been received
- *
- * Inputs: miccntx - mic context to check seq against
- * micSeq - the Mic seq number
- *
- * Returns: TRUE if valid otherwise FALSE.
- *
- * Author: sbraneky (10/15/01)
- * Merciless hacks by rwilcher (1/14/02)
- *---------------------------------------------------------------------------
- */
-
-static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq)
-{
- u32 seq,index;
-
- //Allow for the ap being rebooted - if it is then use the next
- //sequence number of the current sequence number - might go backwards
-
- if (mcast) {
- if (test_bit(FLAG_UPDATE_MULTI, &ai->flags)) {
- clear_bit (FLAG_UPDATE_MULTI, &ai->flags);
- context->window = (micSeq > 33) ? micSeq : 33;
- context->rx = 0; // Reset rx
- }
- } else if (test_bit(FLAG_UPDATE_UNI, &ai->flags)) {
- clear_bit (FLAG_UPDATE_UNI, &ai->flags);
- context->window = (micSeq > 33) ? micSeq : 33; // Move window
- context->rx = 0; // Reset rx
- }
-
- //Make sequence number relative to START of window
- seq = micSeq - (context->window - 33);
-
- //Too old of a SEQ number to check.
- if ((s32)seq < 0)
- return ERROR;
-
- if ( seq > 64 ) {
- //Window is infinite forward
- MoveWindow(context,micSeq);
- return SUCCESS;
- }
-
- // We are in the window. Now check the context rx bit to see if it was already sent
- seq >>= 1; //divide by 2 because we only have odd numbers
- index = 1 << seq; //Get an index number
-
- if (!(context->rx & index)) {
- //micSEQ falls inside the window.
- //Add seqence number to the list of received numbers.
- context->rx |= index;
-
- MoveWindow(context,micSeq);
-
- return SUCCESS;
- }
- return ERROR;
-}
-
-static void MoveWindow(miccntx *context, u32 micSeq)
-{
- u32 shift;
-
- //Move window if seq greater than the middle of the window
- if (micSeq > context->window) {
- shift = (micSeq - context->window) >> 1;
-
- //Shift out old
- if (shift < 32)
- context->rx >>= shift;
- else
- context->rx = 0;
-
- context->window = micSeq; //Move window
- }
-}
-
-/*==============================================*/
-/*========== EMMH ROUTINES ====================*/
-/*==============================================*/
-
-/* mic accumulate */
-#define MIC_ACCUM(val) \
- context->accum += (u64)(val) * context->coeff[coeff_position++];
-
-static unsigned char aes_counter[16];
-
-/* expand the key to fill the MMH coefficient array */
-static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
- struct crypto_cipher *tfm)
-{
- /* take the keying material, expand if necessary, truncate at 16-bytes */
- /* run through AES counter mode to generate context->coeff[] */
-
- int i,j;
- u32 counter;
- u8 *cipher, plain[16];
-
- crypto_cipher_setkey(tfm, pkey, 16);
- counter = 0;
- for (i = 0; i < ARRAY_SIZE(context->coeff); ) {
- aes_counter[15] = (u8)(counter >> 0);
- aes_counter[14] = (u8)(counter >> 8);
- aes_counter[13] = (u8)(counter >> 16);
- aes_counter[12] = (u8)(counter >> 24);
- counter++;
- memcpy (plain, aes_counter, 16);
- crypto_cipher_encrypt_one(tfm, plain, plain);
- cipher = plain;
- for (j = 0; (j < 16) && (i < ARRAY_SIZE(context->coeff)); ) {
- context->coeff[i++] = ntohl(*(__be32 *)&cipher[j]);
- j += 4;
- }
- }
-}
-
-/* prepare for calculation of a new mic */
-static void emmh32_init(emmh32_context *context)
-{
- /* prepare for new mic calculation */
- context->accum = 0;
- context->position = 0;
-}
-
-/* add some bytes to the mic calculation */
-static void emmh32_update(emmh32_context *context, u8 *pOctets, int len)
-{
- int coeff_position, byte_position;
-
- if (len == 0) return;
-
- coeff_position = context->position >> 2;
-
- /* deal with partial 32-bit word left over from last update */
- byte_position = context->position & 3;
- if (byte_position) {
- /* have a partial word in part to deal with */
- do {
- if (len == 0) return;
- context->part.d8[byte_position++] = *pOctets++;
- context->position++;
- len--;
- } while (byte_position < 4);
- MIC_ACCUM(ntohl(context->part.d32));
- }
-
- /* deal with full 32-bit words */
- while (len >= 4) {
- MIC_ACCUM(ntohl(*(__be32 *)pOctets));
- context->position += 4;
- pOctets += 4;
- len -= 4;
- }
-
- /* deal with partial 32-bit word that will be left over from this update */
- byte_position = 0;
- while (len > 0) {
- context->part.d8[byte_position++] = *pOctets++;
- context->position++;
- len--;
- }
-}
-
-/* mask used to zero empty bytes for final partial word */
-static u32 mask32[4] = { 0x00000000L, 0xFF000000L, 0xFFFF0000L, 0xFFFFFF00L };
-
-/* calculate the mic */
-static void emmh32_final(emmh32_context *context, u8 digest[4])
-{
- int coeff_position, byte_position;
- u32 val;
-
- u64 sum, utmp;
- s64 stmp;
-
- coeff_position = context->position >> 2;
-
- /* deal with partial 32-bit word left over from last update */
- byte_position = context->position & 3;
- if (byte_position) {
- /* have a partial word in part to deal with */
- val = ntohl(context->part.d32);
- MIC_ACCUM(val & mask32[byte_position]); /* zero empty bytes */
- }
-
- /* reduce the accumulated u64 to a 32-bit MIC */
- sum = context->accum;
- stmp = (sum & 0xffffffffLL) - ((sum >> 32) * 15);
- utmp = (stmp & 0xffffffffLL) - ((stmp >> 32) * 15);
- sum = utmp & 0xffffffffLL;
- if (utmp > 0x10000000fLL)
- sum -= 15;
-
- val = (u32)sum;
- digest[0] = (val>>24) & 0xFF;
- digest[1] = (val>>16) & 0xFF;
- digest[2] = (val>>8) & 0xFF;
- digest[3] = val & 0xFF;
-}
-
-static int readBSSListRid(struct airo_info *ai, int first,
- BSSListRid *list)
-{
- Cmd cmd;
- Resp rsp;
-
- if (first == 1) {
- if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_LISTBSS;
- if (down_interruptible(&ai->sem))
- return -ERESTARTSYS;
- ai->list_bss_task = current;
- issuecommand(ai, &cmd, &rsp);
- up(&ai->sem);
- /* Let the command take effect */
- schedule_timeout_uninterruptible(3 * HZ);
- ai->list_bss_task = NULL;
- }
- return PC4500_readrid(ai, first ? ai->bssListFirst : ai->bssListNext,
- list, ai->bssListRidLen, 1);
-}
-
-static int readWepKeyRid(struct airo_info *ai, WepKeyRid *wkr, int temp, int lock)
-{
- return PC4500_readrid(ai, temp ? RID_WEP_TEMP : RID_WEP_PERM,
- wkr, sizeof(*wkr), lock);
-}
-
-static int writeWepKeyRid(struct airo_info *ai, WepKeyRid *wkr, int perm, int lock)
-{
- int rc;
- rc = PC4500_writerid(ai, RID_WEP_TEMP, wkr, sizeof(*wkr), lock);
- if (rc!=SUCCESS)
- airo_print_err(ai->dev->name, "WEP_TEMP set %x", rc);
- if (perm) {
- rc = PC4500_writerid(ai, RID_WEP_PERM, wkr, sizeof(*wkr), lock);
- if (rc!=SUCCESS)
- airo_print_err(ai->dev->name, "WEP_PERM set %x", rc);
- }
- return rc;
-}
-
-static int readSsidRid(struct airo_info*ai, SsidRid *ssidr)
-{
- return PC4500_readrid(ai, RID_SSID, ssidr, sizeof(*ssidr), 1);
-}
-
-static int writeSsidRid(struct airo_info*ai, SsidRid *pssidr, int lock)
-{
- return PC4500_writerid(ai, RID_SSID, pssidr, sizeof(*pssidr), lock);
-}
-
-static int readConfigRid(struct airo_info *ai, int lock)
-{
- int rc;
- ConfigRid cfg;
-
- if (ai->config.len)
- return SUCCESS;
-
- rc = PC4500_readrid(ai, RID_ACTUALCONFIG, &cfg, sizeof(cfg), lock);
- if (rc != SUCCESS)
- return rc;
-
- ai->config = cfg;
- return SUCCESS;
-}
-
-static inline void checkThrottle(struct airo_info *ai)
-{
- int i;
-/* Old hardware had a limit on encryption speed */
- if (ai->config.authType != AUTH_OPEN && maxencrypt) {
- for(i=0; i<8; i++) {
- if (ai->config.rates[i] > maxencrypt) {
- ai->config.rates[i] = 0;
- }
- }
- }
-}
-
-static int writeConfigRid(struct airo_info *ai, int lock)
-{
- ConfigRid cfgr;
-
- if (!test_bit (FLAG_COMMIT, &ai->flags))
- return SUCCESS;
-
- clear_bit (FLAG_COMMIT, &ai->flags);
- clear_bit (FLAG_RESET, &ai->flags);
- checkThrottle(ai);
- cfgr = ai->config;
-
- if ((cfgr.opmode & MODE_CFG_MASK) == MODE_STA_IBSS)
- set_bit(FLAG_ADHOC, &ai->flags);
- else
- clear_bit(FLAG_ADHOC, &ai->flags);
-
- return PC4500_writerid( ai, RID_CONFIG, &cfgr, sizeof(cfgr), lock);
-}
-
-static int readStatusRid(struct airo_info *ai, StatusRid *statr, int lock)
-{
- return PC4500_readrid(ai, RID_STATUS, statr, sizeof(*statr), lock);
-}
-
-static int readAPListRid(struct airo_info *ai, APListRid *aplr)
-{
- return PC4500_readrid(ai, RID_APLIST, aplr, sizeof(*aplr), 1);
-}
-
-static int writeAPListRid(struct airo_info *ai, APListRid *aplr, int lock)
-{
- return PC4500_writerid(ai, RID_APLIST, aplr, sizeof(*aplr), lock);
-}
-
-static int readCapabilityRid(struct airo_info *ai, CapabilityRid *capr, int lock)
-{
- return PC4500_readrid(ai, RID_CAPABILITIES, capr, sizeof(*capr), lock);
-}
-
-static int readStatsRid(struct airo_info*ai, StatsRid *sr, int rid, int lock)
-{
- return PC4500_readrid(ai, rid, sr, sizeof(*sr), lock);
-}
-
-static void try_auto_wep(struct airo_info *ai)
-{
- if (auto_wep && !(ai->flags & FLAG_RADIO_DOWN)) {
- ai->expires = RUN_AT(3*HZ);
- wake_up_interruptible(&ai->thr_wait);
- }
-}
-
-static int airo_open(struct net_device *dev) {
- struct airo_info *ai = dev->ml_priv;
- int rc = 0;
-
- if (test_bit(FLAG_FLASHING, &ai->flags))
- return -EIO;
-
- /* Make sure the card is configured.
- * Wireless Extensions may postpone config changes until the card
- * is open (to pipeline changes and speed-up card setup). If
- * those changes are not yet commited, do it now - Jean II */
- if (test_bit(FLAG_COMMIT, &ai->flags)) {
- disable_MAC(ai, 1);
- writeConfigRid(ai, 1);
- }
-
- if (ai->wifidev != dev) {
- clear_bit(JOB_DIE, &ai->jobs);
- ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name);
- if (IS_ERR(ai->airo_thread_task))
- return (int)PTR_ERR(ai->airo_thread_task);
-
- rc = request_irq(dev->irq, airo_interrupt, IRQF_SHARED,
- dev->name, dev);
- if (rc) {
- airo_print_err(dev->name,
- "register interrupt %d failed, rc %d",
- dev->irq, rc);
- set_bit(JOB_DIE, &ai->jobs);
- kthread_stop(ai->airo_thread_task);
- return rc;
- }
-
- /* Power on the MAC controller (which may have been disabled) */
- clear_bit(FLAG_RADIO_DOWN, &ai->flags);
- enable_interrupts(ai);
-
- try_auto_wep(ai);
- }
- enable_MAC(ai, 1);
-
- netif_start_queue(dev);
- return 0;
-}
-
-static int mpi_start_xmit(struct sk_buff *skb, struct net_device *dev) {
- int npacks, pending;
- unsigned long flags;
- struct airo_info *ai = dev->ml_priv;
-
- if (!skb) {
- airo_print_err(dev->name, "%s: skb == NULL!",__func__);
- return 0;
- }
- npacks = skb_queue_len (&ai->txq);
-
- if (npacks >= MAXTXQ - 1) {
- netif_stop_queue (dev);
- if (npacks > MAXTXQ) {
- dev->stats.tx_fifo_errors++;
- return 1;
- }
- skb_queue_tail (&ai->txq, skb);
- return 0;
- }
-
- spin_lock_irqsave(&ai->aux_lock, flags);
- skb_queue_tail (&ai->txq, skb);
- pending = test_bit(FLAG_PENDING_XMIT, &ai->flags);
- spin_unlock_irqrestore(&ai->aux_lock,flags);
- netif_wake_queue (dev);
-
- if (pending == 0) {
- set_bit(FLAG_PENDING_XMIT, &ai->flags);
- mpi_send_packet (dev);
- }
- return 0;
-}
-
-/*
- * @mpi_send_packet
- *
- * Attempt to transmit a packet. Can be called from interrupt
- * or transmit . return number of packets we tried to send
- */
-
-static int mpi_send_packet (struct net_device *dev)
-{
- struct sk_buff *skb;
- unsigned char *buffer;
- s16 len;
- __le16 *payloadLen;
- struct airo_info *ai = dev->ml_priv;
- u8 *sendbuf;
-
- /* get a packet to send */
-
- if ((skb = skb_dequeue(&ai->txq)) == NULL) {
- airo_print_err(dev->name,
- "%s: Dequeue'd zero in send_packet()",
- __func__);
- return 0;
- }
-
- /* check min length*/
- len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- buffer = skb->data;
-
- ai->txfids[0].tx_desc.offset = 0;
- ai->txfids[0].tx_desc.valid = 1;
- ai->txfids[0].tx_desc.eoc = 1;
- ai->txfids[0].tx_desc.len =len+sizeof(WifiHdr);
-
-/*
- * Magic, the cards firmware needs a length count (2 bytes) in the host buffer
- * right after TXFID_HDR.The TXFID_HDR contains the status short so payloadlen
- * is immediatly after it. ------------------------------------------------
- * |TXFIDHDR+STATUS|PAYLOADLEN|802.3HDR|PACKETDATA|
- * ------------------------------------------------
- */
-
- memcpy((char *)ai->txfids[0].virtual_host_addr,
- (char *)&wifictlhdr8023, sizeof(wifictlhdr8023));
-
- payloadLen = (__le16 *)(ai->txfids[0].virtual_host_addr +
- sizeof(wifictlhdr8023));
- sendbuf = ai->txfids[0].virtual_host_addr +
- sizeof(wifictlhdr8023) + 2 ;
-
- /*
- * Firmware automaticly puts 802 header on so
- * we don't need to account for it in the length
- */
- if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled &&
- (ntohs(((__be16 *)buffer)[6]) != 0x888E)) {
- MICBuffer pMic;
-
- if (encapsulate(ai, (etherHead *)buffer, &pMic, len - sizeof(etherHead)) != SUCCESS)
- return ERROR;
-
- *payloadLen = cpu_to_le16(len-sizeof(etherHead)+sizeof(pMic));
- ai->txfids[0].tx_desc.len += sizeof(pMic);
- /* copy data into airo dma buffer */
- memcpy (sendbuf, buffer, sizeof(etherHead));
- buffer += sizeof(etherHead);
- sendbuf += sizeof(etherHead);
- memcpy (sendbuf, &pMic, sizeof(pMic));
- sendbuf += sizeof(pMic);
- memcpy (sendbuf, buffer, len - sizeof(etherHead));
- } else {
- *payloadLen = cpu_to_le16(len - sizeof(etherHead));
-
- dev->trans_start = jiffies;
-
- /* copy data into airo dma buffer */
- memcpy(sendbuf, buffer, len);
- }
-
- memcpy_toio(ai->txfids[0].card_ram_off,
- &ai->txfids[0].tx_desc, sizeof(TxFid));
-
- OUT4500(ai, EVACK, 8);
-
- dev_kfree_skb_any(skb);
- return 1;
-}
-
-static void get_tx_error(struct airo_info *ai, s32 fid)
-{
- __le16 status;
-
- if (fid < 0)
- status = ((WifiCtlHdr *)ai->txfids[0].virtual_host_addr)->ctlhdr.status;
- else {
- if (bap_setup(ai, ai->fids[fid] & 0xffff, 4, BAP0) != SUCCESS)
- return;
- bap_read(ai, &status, 2, BAP0);
- }
- if (le16_to_cpu(status) & 2) /* Too many retries */
- ai->dev->stats.tx_aborted_errors++;
- if (le16_to_cpu(status) & 4) /* Transmit lifetime exceeded */
- ai->dev->stats.tx_heartbeat_errors++;
- if (le16_to_cpu(status) & 8) /* Aid fail */
- { }
- if (le16_to_cpu(status) & 0x10) /* MAC disabled */
- ai->dev->stats.tx_carrier_errors++;
- if (le16_to_cpu(status) & 0x20) /* Association lost */
- { }
- /* We produce a TXDROP event only for retry or lifetime
- * exceeded, because that's the only status that really mean
- * that this particular node went away.
- * Other errors means that *we* screwed up. - Jean II */
- if ((le16_to_cpu(status) & 2) ||
- (le16_to_cpu(status) & 4)) {
- union iwreq_data wrqu;
- char junk[0x18];
-
- /* Faster to skip over useless data than to do
- * another bap_setup(). We are at offset 0x6 and
- * need to go to 0x18 and read 6 bytes - Jean II */
- bap_read(ai, (__le16 *) junk, 0x18, BAP0);
-
- /* Copy 802.11 dest address.
- * We use the 802.11 header because the frame may
- * not be 802.3 or may be mangled...
- * In Ad-Hoc mode, it will be the node address.
- * In managed mode, it will be most likely the AP addr
- * User space will figure out how to convert it to
- * whatever it needs (IP address or else).
- * - Jean II */
- memcpy(wrqu.addr.sa_data, junk + 0x12, ETH_ALEN);
- wrqu.addr.sa_family = ARPHRD_ETHER;
-
- /* Send event to user space */
- wireless_send_event(ai->dev, IWEVTXDROP, &wrqu, NULL);
- }
-}
-
-static void airo_end_xmit(struct net_device *dev) {
- u16 status;
- int i;
- struct airo_info *priv = dev->ml_priv;
- struct sk_buff *skb = priv->xmit.skb;
- int fid = priv->xmit.fid;
- u32 *fids = priv->fids;
-
- clear_bit(JOB_XMIT, &priv->jobs);
- clear_bit(FLAG_PENDING_XMIT, &priv->flags);
- status = transmit_802_3_packet (priv, fids[fid], skb->data);
- up(&priv->sem);
-
- i = 0;
- if ( status == SUCCESS ) {
- dev->trans_start = jiffies;
- for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++);
- } else {
- priv->fids[fid] &= 0xffff;
- dev->stats.tx_window_errors++;
- }
- if (i < MAX_FIDS / 2)
- netif_wake_queue(dev);
- dev_kfree_skb(skb);
-}
-
-static int airo_start_xmit(struct sk_buff *skb, struct net_device *dev) {
- s16 len;
- int i, j;
- struct airo_info *priv = dev->ml_priv;
- u32 *fids = priv->fids;
-
- if ( skb == NULL ) {
- airo_print_err(dev->name, "%s: skb == NULL!", __func__);
- return 0;
- }
-
- /* Find a vacant FID */
- for( i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++ );
- for( j = i + 1; j < MAX_FIDS / 2 && (fids[j] & 0xffff0000); j++ );
-
- if ( j >= MAX_FIDS / 2 ) {
- netif_stop_queue(dev);
-
- if (i == MAX_FIDS / 2) {
- dev->stats.tx_fifo_errors++;
- return 1;
- }
- }
- /* check min length*/
- len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- /* Mark fid as used & save length for later */
- fids[i] |= (len << 16);
- priv->xmit.skb = skb;
- priv->xmit.fid = i;
- if (down_trylock(&priv->sem) != 0) {
- set_bit(FLAG_PENDING_XMIT, &priv->flags);
- netif_stop_queue(dev);
- set_bit(JOB_XMIT, &priv->jobs);
- wake_up_interruptible(&priv->thr_wait);
- } else
- airo_end_xmit(dev);
- return 0;
-}
-
-static void airo_end_xmit11(struct net_device *dev) {
- u16 status;
- int i;
- struct airo_info *priv = dev->ml_priv;
- struct sk_buff *skb = priv->xmit11.skb;
- int fid = priv->xmit11.fid;
- u32 *fids = priv->fids;
-
- clear_bit(JOB_XMIT11, &priv->jobs);
- clear_bit(FLAG_PENDING_XMIT11, &priv->flags);
- status = transmit_802_11_packet (priv, fids[fid], skb->data);
- up(&priv->sem);
-
- i = MAX_FIDS / 2;
- if ( status == SUCCESS ) {
- dev->trans_start = jiffies;
- for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++);
- } else {
- priv->fids[fid] &= 0xffff;
- dev->stats.tx_window_errors++;
- }
- if (i < MAX_FIDS)
- netif_wake_queue(dev);
- dev_kfree_skb(skb);
-}
-
-static int airo_start_xmit11(struct sk_buff *skb, struct net_device *dev) {
- s16 len;
- int i, j;
- struct airo_info *priv = dev->ml_priv;
- u32 *fids = priv->fids;
-
- if (test_bit(FLAG_MPI, &priv->flags)) {
- /* Not implemented yet for MPI350 */
- netif_stop_queue(dev);
- return -ENETDOWN;
- }
-
- if ( skb == NULL ) {
- airo_print_err(dev->name, "%s: skb == NULL!", __func__);
- return 0;
- }
-
- /* Find a vacant FID */
- for( i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++ );
- for( j = i + 1; j < MAX_FIDS && (fids[j] & 0xffff0000); j++ );
-
- if ( j >= MAX_FIDS ) {
- netif_stop_queue(dev);
-
- if (i == MAX_FIDS) {
- dev->stats.tx_fifo_errors++;
- return 1;
- }
- }
- /* check min length*/
- len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- /* Mark fid as used & save length for later */
- fids[i] |= (len << 16);
- priv->xmit11.skb = skb;
- priv->xmit11.fid = i;
- if (down_trylock(&priv->sem) != 0) {
- set_bit(FLAG_PENDING_XMIT11, &priv->flags);
- netif_stop_queue(dev);
- set_bit(JOB_XMIT11, &priv->jobs);
- wake_up_interruptible(&priv->thr_wait);
- } else
- airo_end_xmit11(dev);
- return 0;
-}
-
-static void airo_read_stats(struct net_device *dev)
-{
- struct airo_info *ai = dev->ml_priv;
- StatsRid stats_rid;
- __le32 *vals = stats_rid.vals;
-
- clear_bit(JOB_STATS, &ai->jobs);
- if (ai->power.event) {
- up(&ai->sem);
- return;
- }
- readStatsRid(ai, &stats_rid, RID_STATS, 0);
- up(&ai->sem);
-
- dev->stats.rx_packets = le32_to_cpu(vals[43]) + le32_to_cpu(vals[44]) +
- le32_to_cpu(vals[45]);
- dev->stats.tx_packets = le32_to_cpu(vals[39]) + le32_to_cpu(vals[40]) +
- le32_to_cpu(vals[41]);
- dev->stats.rx_bytes = le32_to_cpu(vals[92]);
- dev->stats.tx_bytes = le32_to_cpu(vals[91]);
- dev->stats.rx_errors = le32_to_cpu(vals[0]) + le32_to_cpu(vals[2]) +
- le32_to_cpu(vals[3]) + le32_to_cpu(vals[4]);
- dev->stats.tx_errors = le32_to_cpu(vals[42]) +
- dev->stats.tx_fifo_errors;
- dev->stats.multicast = le32_to_cpu(vals[43]);
- dev->stats.collisions = le32_to_cpu(vals[89]);
-
- /* detailed rx_errors: */
- dev->stats.rx_length_errors = le32_to_cpu(vals[3]);
- dev->stats.rx_crc_errors = le32_to_cpu(vals[4]);
- dev->stats.rx_frame_errors = le32_to_cpu(vals[2]);
- dev->stats.rx_fifo_errors = le32_to_cpu(vals[0]);
-}
-
-static struct net_device_stats *airo_get_stats(struct net_device *dev)
-{
- struct airo_info *local = dev->ml_priv;
-
- if (!test_bit(JOB_STATS, &local->jobs)) {
- /* Get stats out of the card if available */
- if (down_trylock(&local->sem) != 0) {
- set_bit(JOB_STATS, &local->jobs);
- wake_up_interruptible(&local->thr_wait);
- } else
- airo_read_stats(dev);
- }
-
- return &dev->stats;
-}
-
-static void airo_set_promisc(struct airo_info *ai) {
- Cmd cmd;
- Resp rsp;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_SETMODE;
- clear_bit(JOB_PROMISC, &ai->jobs);
- cmd.parm0=(ai->flags&IFF_PROMISC) ? PROMISC : NOPROMISC;
- issuecommand(ai, &cmd, &rsp);
- up(&ai->sem);
-}
-
-static void airo_set_multicast_list(struct net_device *dev) {
- struct airo_info *ai = dev->ml_priv;
-
- if ((dev->flags ^ ai->flags) & IFF_PROMISC) {
- change_bit(FLAG_PROMISC, &ai->flags);
- if (down_trylock(&ai->sem) != 0) {
- set_bit(JOB_PROMISC, &ai->jobs);
- wake_up_interruptible(&ai->thr_wait);
- } else
- airo_set_promisc(ai);
- }
-
- if ((dev->flags&IFF_ALLMULTI)||dev->mc_count>0) {
- /* Turn on multicast. (Should be already setup...) */
- }
-}
-
-static int airo_set_mac_address(struct net_device *dev, void *p)
-{
- struct airo_info *ai = dev->ml_priv;
- struct sockaddr *addr = p;
-
- readConfigRid(ai, 1);
- memcpy (ai->config.macAddr, addr->sa_data, dev->addr_len);
- set_bit (FLAG_COMMIT, &ai->flags);
- disable_MAC(ai, 1);
- writeConfigRid (ai, 1);
- enable_MAC(ai, 1);
- memcpy (ai->dev->dev_addr, addr->sa_data, dev->addr_len);
- if (ai->wifidev)
- memcpy (ai->wifidev->dev_addr, addr->sa_data, dev->addr_len);
- return 0;
-}
-
-static int airo_change_mtu(struct net_device *dev, int new_mtu)
-{
- if ((new_mtu < 68) || (new_mtu > 2400))
- return -EINVAL;
- dev->mtu = new_mtu;
- return 0;
-}
-
-static LIST_HEAD(airo_devices);
-
-static void add_airo_dev(struct airo_info *ai)
-{
- /* Upper layers already keep track of PCI devices,
- * so we only need to remember our non-PCI cards. */
- if (!ai->pci)
- list_add_tail(&ai->dev_list, &airo_devices);
-}
-
-static void del_airo_dev(struct airo_info *ai)
-{
- if (!ai->pci)
- list_del(&ai->dev_list);
-}
-
-static int airo_close(struct net_device *dev) {
- struct airo_info *ai = dev->ml_priv;
-
- netif_stop_queue(dev);
-
- if (ai->wifidev != dev) {
-#ifdef POWER_ON_DOWN
- /* Shut power to the card. The idea is that the user can save
- * power when he doesn't need the card with "ifconfig down".
- * That's the method that is most friendly towards the network
- * stack (i.e. the network stack won't try to broadcast
- * anything on the interface and routes are gone. Jean II */
- set_bit(FLAG_RADIO_DOWN, &ai->flags);
- disable_MAC(ai, 1);
-#endif
- disable_interrupts( ai );
-
- free_irq(dev->irq, dev);
-
- set_bit(JOB_DIE, &ai->jobs);
- kthread_stop(ai->airo_thread_task);
- }
- return 0;
-}
-
-void stop_airo_card( struct net_device *dev, int freeres )
-{
- struct airo_info *ai = dev->ml_priv;
-
- set_bit(FLAG_RADIO_DOWN, &ai->flags);
- disable_MAC(ai, 1);
- disable_interrupts(ai);
- takedown_proc_entry( dev, ai );
- if (test_bit(FLAG_REGISTERED, &ai->flags)) {
- unregister_netdev( dev );
- if (ai->wifidev) {
- unregister_netdev(ai->wifidev);
- free_netdev(ai->wifidev);
- ai->wifidev = NULL;
- }
- clear_bit(FLAG_REGISTERED, &ai->flags);
- }
- /*
- * Clean out tx queue
- */
- if (test_bit(FLAG_MPI, &ai->flags) && !skb_queue_empty(&ai->txq)) {
- struct sk_buff *skb = NULL;
- for (;(skb = skb_dequeue(&ai->txq));)
- dev_kfree_skb(skb);
- }
-
- airo_networks_free (ai);
-
- kfree(ai->flash);
- kfree(ai->rssi);
- kfree(ai->APList);
- kfree(ai->SSID);
- if (freeres) {
- /* PCMCIA frees this stuff, so only for PCI and ISA */
- release_region( dev->base_addr, 64 );
- if (test_bit(FLAG_MPI, &ai->flags)) {
- if (ai->pci)
- mpi_unmap_card(ai->pci);
- if (ai->pcimem)
- iounmap(ai->pcimem);
- if (ai->pciaux)
- iounmap(ai->pciaux);
- pci_free_consistent(ai->pci, PCI_SHARED_LEN,
- ai->shared, ai->shared_dma);
- }
- }
- crypto_free_cipher(ai->tfm);
- del_airo_dev(ai);
- free_netdev( dev );
-}
-
-EXPORT_SYMBOL(stop_airo_card);
-
-static int wll_header_parse(const struct sk_buff *skb, unsigned char *haddr)
-{
- memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN);
- return ETH_ALEN;
-}
-
-static void mpi_unmap_card(struct pci_dev *pci)
-{
- unsigned long mem_start = pci_resource_start(pci, 1);
- unsigned long mem_len = pci_resource_len(pci, 1);
- unsigned long aux_start = pci_resource_start(pci, 2);
- unsigned long aux_len = AUXMEMSIZE;
-
- release_mem_region(aux_start, aux_len);
- release_mem_region(mem_start, mem_len);
-}
-
-/*************************************************************
- * This routine assumes that descriptors have been setup .
- * Run at insmod time or after reset when the decriptors
- * have been initialized . Returns 0 if all is well nz
- * otherwise . Does not allocate memory but sets up card
- * using previously allocated descriptors.
- */
-static int mpi_init_descriptors (struct airo_info *ai)
-{
- Cmd cmd;
- Resp rsp;
- int i;
- int rc = SUCCESS;
-
- /* Alloc card RX descriptors */
- netif_stop_queue(ai->dev);
-
- memset(&rsp,0,sizeof(rsp));
- memset(&cmd,0,sizeof(cmd));
-
- cmd.cmd = CMD_ALLOCATEAUX;
- cmd.parm0 = FID_RX;
- cmd.parm1 = (ai->rxfids[0].card_ram_off - ai->pciaux);
- cmd.parm2 = MPI_MAX_FIDS;
- rc=issuecommand(ai, &cmd, &rsp);
- if (rc != SUCCESS) {
- airo_print_err(ai->dev->name, "Couldn't allocate RX FID");
- return rc;
- }
-
- for (i=0; i<MPI_MAX_FIDS; i++) {
- memcpy_toio(ai->rxfids[i].card_ram_off,
- &ai->rxfids[i].rx_desc, sizeof(RxFid));
- }
-
- /* Alloc card TX descriptors */
-
- memset(&rsp,0,sizeof(rsp));
- memset(&cmd,0,sizeof(cmd));
-
- cmd.cmd = CMD_ALLOCATEAUX;
- cmd.parm0 = FID_TX;
- cmd.parm1 = (ai->txfids[0].card_ram_off - ai->pciaux);
- cmd.parm2 = MPI_MAX_FIDS;
-
- for (i=0; i<MPI_MAX_FIDS; i++) {
- ai->txfids[i].tx_desc.valid = 1;
- memcpy_toio(ai->txfids[i].card_ram_off,
- &ai->txfids[i].tx_desc, sizeof(TxFid));
- }
- ai->txfids[i-1].tx_desc.eoc = 1; /* Last descriptor has EOC set */
-
- rc=issuecommand(ai, &cmd, &rsp);
- if (rc != SUCCESS) {
- airo_print_err(ai->dev->name, "Couldn't allocate TX FID");
- return rc;
- }
-
- /* Alloc card Rid descriptor */
- memset(&rsp,0,sizeof(rsp));
- memset(&cmd,0,sizeof(cmd));
-
- cmd.cmd = CMD_ALLOCATEAUX;
- cmd.parm0 = RID_RW;
- cmd.parm1 = (ai->config_desc.card_ram_off - ai->pciaux);
- cmd.parm2 = 1; /* Magic number... */
- rc=issuecommand(ai, &cmd, &rsp);
- if (rc != SUCCESS) {
- airo_print_err(ai->dev->name, "Couldn't allocate RID");
- return rc;
- }
-
- memcpy_toio(ai->config_desc.card_ram_off,
- &ai->config_desc.rid_desc, sizeof(Rid));
-
- return rc;
-}
-
-/*
- * We are setting up three things here:
- * 1) Map AUX memory for descriptors: Rid, TxFid, or RxFid.
- * 2) Map PCI memory for issueing commands.
- * 3) Allocate memory (shared) to send and receive ethernet frames.
- */
-static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci)
-{
- unsigned long mem_start, mem_len, aux_start, aux_len;
- int rc = -1;
- int i;
- dma_addr_t busaddroff;
- unsigned char *vpackoff;
- unsigned char __iomem *pciaddroff;
-
- mem_start = pci_resource_start(pci, 1);
- mem_len = pci_resource_len(pci, 1);
- aux_start = pci_resource_start(pci, 2);
- aux_len = AUXMEMSIZE;
-
- if (!request_mem_region(mem_start, mem_len, DRV_NAME)) {
- airo_print_err("", "Couldn't get region %x[%x]",
- (int)mem_start, (int)mem_len);
- goto out;
- }
- if (!request_mem_region(aux_start, aux_len, DRV_NAME)) {
- airo_print_err("", "Couldn't get region %x[%x]",
- (int)aux_start, (int)aux_len);
- goto free_region1;
- }
-
- ai->pcimem = ioremap(mem_start, mem_len);
- if (!ai->pcimem) {
- airo_print_err("", "Couldn't map region %x[%x]",
- (int)mem_start, (int)mem_len);
- goto free_region2;
- }
- ai->pciaux = ioremap(aux_start, aux_len);
- if (!ai->pciaux) {
- airo_print_err("", "Couldn't map region %x[%x]",
- (int)aux_start, (int)aux_len);
- goto free_memmap;
- }
-
- /* Reserve PKTSIZE for each fid and 2K for the Rids */
- ai->shared = pci_alloc_consistent(pci, PCI_SHARED_LEN, &ai->shared_dma);
- if (!ai->shared) {
- airo_print_err("", "Couldn't alloc_consistent %d",
- PCI_SHARED_LEN);
- goto free_auxmap;
- }
-
- /*
- * Setup descriptor RX, TX, CONFIG
- */
- busaddroff = ai->shared_dma;
- pciaddroff = ai->pciaux + AUX_OFFSET;
- vpackoff = ai->shared;
-
- /* RX descriptor setup */
- for(i = 0; i < MPI_MAX_FIDS; i++) {
- ai->rxfids[i].pending = 0;
- ai->rxfids[i].card_ram_off = pciaddroff;
- ai->rxfids[i].virtual_host_addr = vpackoff;
- ai->rxfids[i].rx_desc.host_addr = busaddroff;
- ai->rxfids[i].rx_desc.valid = 1;
- ai->rxfids[i].rx_desc.len = PKTSIZE;
- ai->rxfids[i].rx_desc.rdy = 0;
-
- pciaddroff += sizeof(RxFid);
- busaddroff += PKTSIZE;
- vpackoff += PKTSIZE;
- }
-
- /* TX descriptor setup */
- for(i = 0; i < MPI_MAX_FIDS; i++) {
- ai->txfids[i].card_ram_off = pciaddroff;
- ai->txfids[i].virtual_host_addr = vpackoff;
- ai->txfids[i].tx_desc.valid = 1;
- ai->txfids[i].tx_desc.host_addr = busaddroff;
- memcpy(ai->txfids[i].virtual_host_addr,
- &wifictlhdr8023, sizeof(wifictlhdr8023));
-
- pciaddroff += sizeof(TxFid);
- busaddroff += PKTSIZE;
- vpackoff += PKTSIZE;
- }
- ai->txfids[i-1].tx_desc.eoc = 1; /* Last descriptor has EOC set */
-
- /* Rid descriptor setup */
- ai->config_desc.card_ram_off = pciaddroff;
- ai->config_desc.virtual_host_addr = vpackoff;
- ai->config_desc.rid_desc.host_addr = busaddroff;
- ai->ridbus = busaddroff;
- ai->config_desc.rid_desc.rid = 0;
- ai->config_desc.rid_desc.len = RIDSIZE;
- ai->config_desc.rid_desc.valid = 1;
- pciaddroff += sizeof(Rid);
- busaddroff += RIDSIZE;
- vpackoff += RIDSIZE;
-
- /* Tell card about descriptors */
- if (mpi_init_descriptors (ai) != SUCCESS)
- goto free_shared;
-
- return 0;
- free_shared:
- pci_free_consistent(pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma);
- free_auxmap:
- iounmap(ai->pciaux);
- free_memmap:
- iounmap(ai->pcimem);
- free_region2:
- release_mem_region(aux_start, aux_len);
- free_region1:
- release_mem_region(mem_start, mem_len);
- out:
- return rc;
-}
-
-static const struct header_ops airo_header_ops = {
- .parse = wll_header_parse,
-};
-
-static const struct net_device_ops airo11_netdev_ops = {
- .ndo_open = airo_open,
- .ndo_stop = airo_close,
- .ndo_start_xmit = airo_start_xmit11,
- .ndo_get_stats = airo_get_stats,
- .ndo_set_mac_address = airo_set_mac_address,
- .ndo_do_ioctl = airo_ioctl,
- .ndo_change_mtu = airo_change_mtu,
-};
-
-static void wifi_setup(struct net_device *dev)
-{
- dev->netdev_ops = &airo11_netdev_ops;
- dev->header_ops = &airo_header_ops;
- dev->wireless_handlers = &airo_handler_def;
-
- dev->type = ARPHRD_IEEE80211;
- dev->hard_header_len = ETH_HLEN;
- dev->mtu = AIRO_DEF_MTU;
- dev->addr_len = ETH_ALEN;
- dev->tx_queue_len = 100;
-
- memset(dev->broadcast,0xFF, ETH_ALEN);
-
- dev->flags = IFF_BROADCAST|IFF_MULTICAST;
-}
-
-static struct net_device *init_wifidev(struct airo_info *ai,
- struct net_device *ethdev)
-{
- int err;
- struct net_device *dev = alloc_netdev(0, "wifi%d", wifi_setup);
- if (!dev)
- return NULL;
- dev->ml_priv = ethdev->ml_priv;
- dev->irq = ethdev->irq;
- dev->base_addr = ethdev->base_addr;
- dev->wireless_data = ethdev->wireless_data;
- SET_NETDEV_DEV(dev, ethdev->dev.parent);
- memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len);
- err = register_netdev(dev);
- if (err<0) {
- free_netdev(dev);
- return NULL;
- }
- return dev;
-}
-
-static int reset_card( struct net_device *dev , int lock) {
- struct airo_info *ai = dev->ml_priv;
-
- if (lock && down_interruptible(&ai->sem))
- return -1;
- waitbusy (ai);
- OUT4500(ai,COMMAND,CMD_SOFTRESET);
- msleep(200);
- waitbusy (ai);
- msleep(200);
- if (lock)
- up(&ai->sem);
- return 0;
-}
-
-#define AIRO_MAX_NETWORK_COUNT 64
-static int airo_networks_allocate(struct airo_info *ai)
-{
- if (ai->networks)
- return 0;
-
- ai->networks =
- kzalloc(AIRO_MAX_NETWORK_COUNT * sizeof(BSSListElement),
- GFP_KERNEL);
- if (!ai->networks) {
- airo_print_warn("", "Out of memory allocating beacons");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void airo_networks_free(struct airo_info *ai)
-{
- kfree(ai->networks);
- ai->networks = NULL;
-}
-
-static void airo_networks_initialize(struct airo_info *ai)
-{
- int i;
-
- INIT_LIST_HEAD(&ai->network_free_list);
- INIT_LIST_HEAD(&ai->network_list);
- for (i = 0; i < AIRO_MAX_NETWORK_COUNT; i++)
- list_add_tail(&ai->networks[i].list,
- &ai->network_free_list);
-}
-
-static const struct net_device_ops airo_netdev_ops = {
- .ndo_open = airo_open,
- .ndo_stop = airo_close,
- .ndo_start_xmit = airo_start_xmit,
- .ndo_get_stats = airo_get_stats,
- .ndo_set_multicast_list = airo_set_multicast_list,
- .ndo_set_mac_address = airo_set_mac_address,
- .ndo_do_ioctl = airo_ioctl,
- .ndo_change_mtu = airo_change_mtu,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static const struct net_device_ops mpi_netdev_ops = {
- .ndo_open = airo_open,
- .ndo_stop = airo_close,
- .ndo_start_xmit = mpi_start_xmit,
- .ndo_get_stats = airo_get_stats,
- .ndo_set_multicast_list = airo_set_multicast_list,
- .ndo_set_mac_address = airo_set_mac_address,
- .ndo_do_ioctl = airo_ioctl,
- .ndo_change_mtu = airo_change_mtu,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-
-static struct net_device *_init_airo_card( unsigned short irq, int port,
- int is_pcmcia, struct pci_dev *pci,
- struct device *dmdev )
-{
- struct net_device *dev;
- struct airo_info *ai;
- int i, rc;
- CapabilityRid cap_rid;
-
- /* Create the network device object. */
- dev = alloc_netdev(sizeof(*ai), "", ether_setup);
- if (!dev) {
- airo_print_err("", "Couldn't alloc_etherdev");
- return NULL;
- }
-
- ai = dev->ml_priv = netdev_priv(dev);
- ai->wifidev = NULL;
- ai->flags = 1 << FLAG_RADIO_DOWN;
- ai->jobs = 0;
- ai->dev = dev;
- if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) {
- airo_print_dbg("", "Found an MPI350 card");
- set_bit(FLAG_MPI, &ai->flags);
- }
- spin_lock_init(&ai->aux_lock);
- sema_init(&ai->sem, 1);
- ai->config.len = 0;
- ai->pci = pci;
- init_waitqueue_head (&ai->thr_wait);
- ai->tfm = NULL;
- add_airo_dev(ai);
-
- if (airo_networks_allocate (ai))
- goto err_out_free;
- airo_networks_initialize (ai);
-
- skb_queue_head_init (&ai->txq);
-
- /* The Airo-specific entries in the device structure. */
- if (test_bit(FLAG_MPI,&ai->flags))
- dev->netdev_ops = &mpi_netdev_ops;
- else
- dev->netdev_ops = &airo_netdev_ops;
- dev->wireless_handlers = &airo_handler_def;
- ai->wireless_data.spy_data = &ai->spy_data;
- dev->wireless_data = &ai->wireless_data;
- dev->irq = irq;
- dev->base_addr = port;
-
- SET_NETDEV_DEV(dev, dmdev);
-
- reset_card (dev, 1);
- msleep(400);
-
- if (!is_pcmcia) {
- if (!request_region(dev->base_addr, 64, DRV_NAME)) {
- rc = -EBUSY;
- airo_print_err(dev->name, "Couldn't request region");
- goto err_out_nets;
- }
- }
-
- if (test_bit(FLAG_MPI,&ai->flags)) {
- if (mpi_map_card(ai, pci)) {
- airo_print_err("", "Could not map memory");
- goto err_out_res;
- }
- }
-
- if (probe) {
- if (setup_card(ai, dev->dev_addr, 1) != SUCCESS) {
- airo_print_err(dev->name, "MAC could not be enabled" );
- rc = -EIO;
- goto err_out_map;
- }
- } else if (!test_bit(FLAG_MPI,&ai->flags)) {
- ai->bap_read = fast_bap_read;
- set_bit(FLAG_FLASHING, &ai->flags);
- }
-
- strcpy(dev->name, "eth%d");
- rc = register_netdev(dev);
- if (rc) {
- airo_print_err(dev->name, "Couldn't register_netdev");
- goto err_out_map;
- }
- ai->wifidev = init_wifidev(ai, dev);
- if (!ai->wifidev)
- goto err_out_reg;
-
- rc = readCapabilityRid(ai, &cap_rid, 1);
- if (rc != SUCCESS) {
- rc = -EIO;
- goto err_out_wifi;
- }
- /* WEP capability discovery */
- ai->wep_capable = (cap_rid.softCap & cpu_to_le16(0x02)) ? 1 : 0;
- ai->max_wep_idx = (cap_rid.softCap & cpu_to_le16(0x80)) ? 3 : 0;
-
- airo_print_info(dev->name, "Firmware version %x.%x.%02x",
- ((le16_to_cpu(cap_rid.softVer) >> 8) & 0xF),
- (le16_to_cpu(cap_rid.softVer) & 0xFF),
- le16_to_cpu(cap_rid.softSubVer));
-
- /* Test for WPA support */
- /* Only firmware versions 5.30.17 or better can do WPA */
- if (le16_to_cpu(cap_rid.softVer) > 0x530
- || (le16_to_cpu(cap_rid.softVer) == 0x530
- && le16_to_cpu(cap_rid.softSubVer) >= 17)) {
- airo_print_info(ai->dev->name, "WPA supported.");
-
- set_bit(FLAG_WPA_CAPABLE, &ai->flags);
- ai->bssListFirst = RID_WPA_BSSLISTFIRST;
- ai->bssListNext = RID_WPA_BSSLISTNEXT;
- ai->bssListRidLen = sizeof(BSSListRid);
- } else {
- airo_print_info(ai->dev->name, "WPA unsupported with firmware "
- "versions older than 5.30.17.");
-
- ai->bssListFirst = RID_BSSLISTFIRST;
- ai->bssListNext = RID_BSSLISTNEXT;
- ai->bssListRidLen = sizeof(BSSListRid) - sizeof(BSSListRidExtra);
- }
-
- set_bit(FLAG_REGISTERED,&ai->flags);
- airo_print_info(dev->name, "MAC enabled %pM", dev->dev_addr);
-
- /* Allocate the transmit buffers */
- if (probe && !test_bit(FLAG_MPI,&ai->flags))
- for( i = 0; i < MAX_FIDS; i++ )
- ai->fids[i] = transmit_allocate(ai,AIRO_DEF_MTU,i>=MAX_FIDS/2);
-
- if (setup_proc_entry(dev, dev->ml_priv) < 0)
- goto err_out_wifi;
-
- return dev;
-
-err_out_wifi:
- unregister_netdev(ai->wifidev);
- free_netdev(ai->wifidev);
-err_out_reg:
- unregister_netdev(dev);
-err_out_map:
- if (test_bit(FLAG_MPI,&ai->flags) && pci) {
- pci_free_consistent(pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma);
- iounmap(ai->pciaux);
- iounmap(ai->pcimem);
- mpi_unmap_card(ai->pci);
- }
-err_out_res:
- if (!is_pcmcia)
- release_region( dev->base_addr, 64 );
-err_out_nets:
- airo_networks_free(ai);
- del_airo_dev(ai);
-err_out_free:
- free_netdev(dev);
- return NULL;
-}
-
-struct net_device *init_airo_card( unsigned short irq, int port, int is_pcmcia,
- struct device *dmdev)
-{
- return _init_airo_card ( irq, port, is_pcmcia, NULL, dmdev);
-}
-
-EXPORT_SYMBOL(init_airo_card);
-
-static int waitbusy (struct airo_info *ai) {
- int delay = 0;
- while ((IN4500(ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) {
- udelay (10);
- if ((++delay % 20) == 0)
- OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
- }
- return delay < 10000;
-}
-
-int reset_airo_card( struct net_device *dev )
-{
- int i;
- struct airo_info *ai = dev->ml_priv;
-
- if (reset_card (dev, 1))
- return -1;
-
- if ( setup_card(ai, dev->dev_addr, 1 ) != SUCCESS ) {
- airo_print_err(dev->name, "MAC could not be enabled");
- return -1;
- }
- airo_print_info(dev->name, "MAC enabled %pM", dev->dev_addr);
- /* Allocate the transmit buffers if needed */
- if (!test_bit(FLAG_MPI,&ai->flags))
- for( i = 0; i < MAX_FIDS; i++ )
- ai->fids[i] = transmit_allocate (ai,AIRO_DEF_MTU,i>=MAX_FIDS/2);
-
- enable_interrupts( ai );
- netif_wake_queue(dev);
- return 0;
-}
-
-EXPORT_SYMBOL(reset_airo_card);
-
-static void airo_send_event(struct net_device *dev) {
- struct airo_info *ai = dev->ml_priv;
- union iwreq_data wrqu;
- StatusRid status_rid;
-
- clear_bit(JOB_EVENT, &ai->jobs);
- PC4500_readrid(ai, RID_STATUS, &status_rid, sizeof(status_rid), 0);
- up(&ai->sem);
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- memcpy(wrqu.ap_addr.sa_data, status_rid.bssid[0], ETH_ALEN);
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
-
- /* Send event to user space */
- wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
-}
-
-static void airo_process_scan_results (struct airo_info *ai) {
- union iwreq_data wrqu;
- BSSListRid bss;
- int rc;
- BSSListElement * loop_net;
- BSSListElement * tmp_net;
-
- /* Blow away current list of scan results */
- list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
- list_move_tail (&loop_net->list, &ai->network_free_list);
- /* Don't blow away ->list, just BSS data */
- memset (loop_net, 0, sizeof (loop_net->bss));
- }
-
- /* Try to read the first entry of the scan result */
- rc = PC4500_readrid(ai, ai->bssListFirst, &bss, ai->bssListRidLen, 0);
- if((rc) || (bss.index == cpu_to_le16(0xffff))) {
- /* No scan results */
- goto out;
- }
-
- /* Read and parse all entries */
- tmp_net = NULL;
- while((!rc) && (bss.index != cpu_to_le16(0xffff))) {
- /* Grab a network off the free list */
- if (!list_empty(&ai->network_free_list)) {
- tmp_net = list_entry(ai->network_free_list.next,
- BSSListElement, list);
- list_del(ai->network_free_list.next);
- }
-
- if (tmp_net != NULL) {
- memcpy(tmp_net, &bss, sizeof(tmp_net->bss));
- list_add_tail(&tmp_net->list, &ai->network_list);
- tmp_net = NULL;
- }
-
- /* Read next entry */
- rc = PC4500_readrid(ai, ai->bssListNext,
- &bss, ai->bssListRidLen, 0);
- }
-
-out:
- ai->scan_timeout = 0;
- clear_bit(JOB_SCAN_RESULTS, &ai->jobs);
- up(&ai->sem);
-
- /* Send an empty event to user space.
- * We don't send the received data on
- * the event because it would require
- * us to do complex transcoding, and
- * we want to minimise the work done in
- * the irq handler. Use a request to
- * extract the data - Jean II */
- wrqu.data.length = 0;
- wrqu.data.flags = 0;
- wireless_send_event(ai->dev, SIOCGIWSCAN, &wrqu, NULL);
-}
-
-static int airo_thread(void *data) {
- struct net_device *dev = data;
- struct airo_info *ai = dev->ml_priv;
- int locked;
-
- set_freezable();
- while(1) {
- /* make swsusp happy with our thread */
- try_to_freeze();
-
- if (test_bit(JOB_DIE, &ai->jobs))
- break;
-
- if (ai->jobs) {
- locked = down_interruptible(&ai->sem);
- } else {
- wait_queue_t wait;
-
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&ai->thr_wait, &wait);
- for (;;) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (ai->jobs)
- break;
- if (ai->expires || ai->scan_timeout) {
- if (ai->scan_timeout &&
- time_after_eq(jiffies,ai->scan_timeout)){
- set_bit(JOB_SCAN_RESULTS, &ai->jobs);
- break;
- } else if (ai->expires &&
- time_after_eq(jiffies,ai->expires)){
- set_bit(JOB_AUTOWEP, &ai->jobs);
- break;
- }
- if (!kthread_should_stop() &&
- !freezing(current)) {
- unsigned long wake_at;
- if (!ai->expires || !ai->scan_timeout) {
- wake_at = max(ai->expires,
- ai->scan_timeout);
- } else {
- wake_at = min(ai->expires,
- ai->scan_timeout);
- }
- schedule_timeout(wake_at - jiffies);
- continue;
- }
- } else if (!kthread_should_stop() &&
- !freezing(current)) {
- schedule();
- continue;
- }
- break;
- }
- current->state = TASK_RUNNING;
- remove_wait_queue(&ai->thr_wait, &wait);
- locked = 1;
- }
-
- if (locked)
- continue;
-
- if (test_bit(JOB_DIE, &ai->jobs)) {
- up(&ai->sem);
- break;
- }
-
- if (ai->power.event || test_bit(FLAG_FLASHING, &ai->flags)) {
- up(&ai->sem);
- continue;
- }
-
- if (test_bit(JOB_XMIT, &ai->jobs))
- airo_end_xmit(dev);
- else if (test_bit(JOB_XMIT11, &ai->jobs))
- airo_end_xmit11(dev);
- else if (test_bit(JOB_STATS, &ai->jobs))
- airo_read_stats(dev);
- else if (test_bit(JOB_WSTATS, &ai->jobs))
- airo_read_wireless_stats(ai);
- else if (test_bit(JOB_PROMISC, &ai->jobs))
- airo_set_promisc(ai);
- else if (test_bit(JOB_MIC, &ai->jobs))
- micinit(ai);
- else if (test_bit(JOB_EVENT, &ai->jobs))
- airo_send_event(dev);
- else if (test_bit(JOB_AUTOWEP, &ai->jobs))
- timer_func(dev);
- else if (test_bit(JOB_SCAN_RESULTS, &ai->jobs))
- airo_process_scan_results(ai);
- else /* Shouldn't get here, but we make sure to unlock */
- up(&ai->sem);
- }
-
- return 0;
-}
-
-static int header_len(__le16 ctl)
-{
- u16 fc = le16_to_cpu(ctl);
- switch (fc & 0xc) {
- case 4:
- if ((fc & 0xe0) == 0xc0)
- return 10; /* one-address control packet */
- return 16; /* two-address control packet */
- case 8:
- if ((fc & 0x300) == 0x300)
- return 30; /* WDS packet */
- }
- return 24;
-}
-
-static void airo_handle_cisco_mic(struct airo_info *ai)
-{
- if (test_bit(FLAG_MIC_CAPABLE, &ai->flags)) {
- set_bit(JOB_MIC, &ai->jobs);
- wake_up_interruptible(&ai->thr_wait);
- }
-}
-
-/* Airo Status codes */
-#define STAT_NOBEACON 0x8000 /* Loss of sync - missed beacons */
-#define STAT_MAXRETRIES 0x8001 /* Loss of sync - max retries */
-#define STAT_MAXARL 0x8002 /* Loss of sync - average retry level exceeded*/
-#define STAT_FORCELOSS 0x8003 /* Loss of sync - host request */
-#define STAT_TSFSYNC 0x8004 /* Loss of sync - TSF synchronization */
-#define STAT_DEAUTH 0x8100 /* low byte is 802.11 reason code */
-#define STAT_DISASSOC 0x8200 /* low byte is 802.11 reason code */
-#define STAT_ASSOC_FAIL 0x8400 /* low byte is 802.11 reason code */
-#define STAT_AUTH_FAIL 0x0300 /* low byte is 802.11 reason code */
-#define STAT_ASSOC 0x0400 /* Associated */
-#define STAT_REASSOC 0x0600 /* Reassociated? Only on firmware >= 5.30.17 */
-
-static void airo_print_status(const char *devname, u16 status)
-{
- u8 reason = status & 0xFF;
-
- switch (status) {
- case STAT_NOBEACON:
- airo_print_dbg(devname, "link lost (missed beacons)");
- break;
- case STAT_MAXRETRIES:
- case STAT_MAXARL:
- airo_print_dbg(devname, "link lost (max retries)");
- break;
- case STAT_FORCELOSS:
- airo_print_dbg(devname, "link lost (local choice)");
- break;
- case STAT_TSFSYNC:
- airo_print_dbg(devname, "link lost (TSF sync lost)");
- break;
- case STAT_DEAUTH:
- airo_print_dbg(devname, "deauthenticated (reason: %d)", reason);
- break;
- case STAT_DISASSOC:
- airo_print_dbg(devname, "disassociated (reason: %d)", reason);
- break;
- case STAT_ASSOC_FAIL:
- airo_print_dbg(devname, "association failed (reason: %d)",
- reason);
- break;
- case STAT_AUTH_FAIL:
- airo_print_dbg(devname, "authentication failed (reason: %d)",
- reason);
- break;
- default:
- break;
- }
-}
-
-static void airo_handle_link(struct airo_info *ai)
-{
- union iwreq_data wrqu;
- int scan_forceloss = 0;
- u16 status;
-
- /* Get new status and acknowledge the link change */
- status = le16_to_cpu(IN4500(ai, LINKSTAT));
- OUT4500(ai, EVACK, EV_LINK);
-
- if ((status == STAT_FORCELOSS) && (ai->scan_timeout > 0))
- scan_forceloss = 1;
-
- airo_print_status(ai->dev->name, status);
-
- if ((status == STAT_ASSOC) || (status == STAT_REASSOC)) {
- if (auto_wep)
- ai->expires = 0;
- if (ai->list_bss_task)
- wake_up_process(ai->list_bss_task);
- set_bit(FLAG_UPDATE_UNI, &ai->flags);
- set_bit(FLAG_UPDATE_MULTI, &ai->flags);
-
- if (down_trylock(&ai->sem) != 0) {
- set_bit(JOB_EVENT, &ai->jobs);
- wake_up_interruptible(&ai->thr_wait);
- } else
- airo_send_event(ai->dev);
- } else if (!scan_forceloss) {
- if (auto_wep && !ai->expires) {
- ai->expires = RUN_AT(3*HZ);
- wake_up_interruptible(&ai->thr_wait);
- }
-
- /* Send event to user space */
- memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN);
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(ai->dev, SIOCGIWAP, &wrqu, NULL);
- }
-}
-
-static void airo_handle_rx(struct airo_info *ai)
-{
- struct sk_buff *skb = NULL;
- __le16 fc, v, *buffer, tmpbuf[4];
- u16 len, hdrlen = 0, gap, fid;
- struct rx_hdr hdr;
- int success = 0;
-
- if (test_bit(FLAG_MPI, &ai->flags)) {
- if (test_bit(FLAG_802_11, &ai->flags))
- mpi_receive_802_11(ai);
- else
- mpi_receive_802_3(ai);
- OUT4500(ai, EVACK, EV_RX);
- return;
- }
-
- fid = IN4500(ai, RXFID);
-
- /* Get the packet length */
- if (test_bit(FLAG_802_11, &ai->flags)) {
- bap_setup (ai, fid, 4, BAP0);
- bap_read (ai, (__le16*)&hdr, sizeof(hdr), BAP0);
- /* Bad CRC. Ignore packet */
- if (le16_to_cpu(hdr.status) & 2)
- hdr.len = 0;
- if (ai->wifidev == NULL)
- hdr.len = 0;
- } else {
- bap_setup(ai, fid, 0x36, BAP0);
- bap_read(ai, &hdr.len, 2, BAP0);
- }
- len = le16_to_cpu(hdr.len);
-
- if (len > AIRO_DEF_MTU) {
- airo_print_err(ai->dev->name, "Bad size %d", len);
- goto done;
- }
- if (len == 0)
- goto done;
-
- if (test_bit(FLAG_802_11, &ai->flags)) {
- bap_read(ai, &fc, sizeof (fc), BAP0);
- hdrlen = header_len(fc);
- } else
- hdrlen = ETH_ALEN * 2;
-
- skb = dev_alloc_skb(len + hdrlen + 2 + 2);
- if (!skb) {
- ai->dev->stats.rx_dropped++;
- goto done;
- }
-
- skb_reserve(skb, 2); /* This way the IP header is aligned */
- buffer = (__le16 *) skb_put(skb, len + hdrlen);
- if (test_bit(FLAG_802_11, &ai->flags)) {
- buffer[0] = fc;
- bap_read(ai, buffer + 1, hdrlen - 2, BAP0);
- if (hdrlen == 24)
- bap_read(ai, tmpbuf, 6, BAP0);
-
- bap_read(ai, &v, sizeof(v), BAP0);
- gap = le16_to_cpu(v);
- if (gap) {
- if (gap <= 8) {
- bap_read(ai, tmpbuf, gap, BAP0);
- } else {
- airo_print_err(ai->dev->name, "gaplen too "
- "big. Problems will follow...");
- }
- }
- bap_read(ai, buffer + hdrlen/2, len, BAP0);
- } else {
- MICBuffer micbuf;
-
- bap_read(ai, buffer, ETH_ALEN * 2, BAP0);
- if (ai->micstats.enabled) {
- bap_read(ai, (__le16 *) &micbuf, sizeof (micbuf), BAP0);
- if (ntohs(micbuf.typelen) > 0x05DC)
- bap_setup(ai, fid, 0x44, BAP0);
- else {
- if (len <= sizeof (micbuf)) {
- dev_kfree_skb_irq(skb);
- goto done;
- }
-
- len -= sizeof(micbuf);
- skb_trim(skb, len + hdrlen);
- }
- }
-
- bap_read(ai, buffer + ETH_ALEN, len, BAP0);
- if (decapsulate(ai, &micbuf, (etherHead*) buffer, len))
- dev_kfree_skb_irq (skb);
- else
- success = 1;
- }
-
-#ifdef WIRELESS_SPY
- if (success && (ai->spy_data.spy_number > 0)) {
- char *sa;
- struct iw_quality wstats;
-
- /* Prepare spy data : addr + qual */
- if (!test_bit(FLAG_802_11, &ai->flags)) {
- sa = (char *) buffer + 6;
- bap_setup(ai, fid, 8, BAP0);
- bap_read(ai, (__le16 *) hdr.rssi, 2, BAP0);
- } else
- sa = (char *) buffer + 10;
- wstats.qual = hdr.rssi[0];
- if (ai->rssi)
- wstats.level = 0x100 - ai->rssi[hdr.rssi[1]].rssidBm;
- else
- wstats.level = (hdr.rssi[1] + 321) / 2;
- wstats.noise = ai->wstats.qual.noise;
- wstats.updated = IW_QUAL_LEVEL_UPDATED
- | IW_QUAL_QUAL_UPDATED
- | IW_QUAL_DBM;
- /* Update spy records */
- wireless_spy_update(ai->dev, sa, &wstats);
- }
-#endif /* WIRELESS_SPY */
-
-done:
- OUT4500(ai, EVACK, EV_RX);
-
- if (success) {
- if (test_bit(FLAG_802_11, &ai->flags)) {
- skb_reset_mac_header(skb);
- skb->pkt_type = PACKET_OTHERHOST;
- skb->dev = ai->wifidev;
- skb->protocol = htons(ETH_P_802_2);
- } else
- skb->protocol = eth_type_trans(skb, ai->dev);
- skb->ip_summed = CHECKSUM_NONE;
-
- netif_rx(skb);
- }
-}
-
-static void airo_handle_tx(struct airo_info *ai, u16 status)
-{
- int i, len = 0, index = -1;
- u16 fid;
-
- if (test_bit(FLAG_MPI, &ai->flags)) {
- unsigned long flags;
-
- if (status & EV_TXEXC)
- get_tx_error(ai, -1);
-
- spin_lock_irqsave(&ai->aux_lock, flags);
- if (!skb_queue_empty(&ai->txq)) {
- spin_unlock_irqrestore(&ai->aux_lock,flags);
- mpi_send_packet(ai->dev);
- } else {
- clear_bit(FLAG_PENDING_XMIT, &ai->flags);
- spin_unlock_irqrestore(&ai->aux_lock,flags);
- netif_wake_queue(ai->dev);
- }
- OUT4500(ai, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC));
- return;
- }
-
- fid = IN4500(ai, TXCOMPLFID);
-
- for(i = 0; i < MAX_FIDS; i++) {
- if ((ai->fids[i] & 0xffff) == fid) {
- len = ai->fids[i] >> 16;
- index = i;
- }
- }
-
- if (index != -1) {
- if (status & EV_TXEXC)
- get_tx_error(ai, index);
-
- OUT4500(ai, EVACK, status & (EV_TX | EV_TXEXC));
-
- /* Set up to be used again */
- ai->fids[index] &= 0xffff;
- if (index < MAX_FIDS / 2) {
- if (!test_bit(FLAG_PENDING_XMIT, &ai->flags))
- netif_wake_queue(ai->dev);
- } else {
- if (!test_bit(FLAG_PENDING_XMIT11, &ai->flags))
- netif_wake_queue(ai->wifidev);
- }
- } else {
- OUT4500(ai, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC));
- airo_print_err(ai->dev->name, "Unallocated FID was used to xmit");
- }
-}
-
-static irqreturn_t airo_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- u16 status, savedInterrupts = 0;
- struct airo_info *ai = dev->ml_priv;
- int handled = 0;
-
- if (!netif_device_present(dev))
- return IRQ_NONE;
-
- for (;;) {
- status = IN4500(ai, EVSTAT);
- if (!(status & STATUS_INTS) || (status == 0xffff))
- break;
-
- handled = 1;
-
- if (status & EV_AWAKE) {
- OUT4500(ai, EVACK, EV_AWAKE);
- OUT4500(ai, EVACK, EV_AWAKE);
- }
-
- if (!savedInterrupts) {
- savedInterrupts = IN4500(ai, EVINTEN);
- OUT4500(ai, EVINTEN, 0);
- }
-
- if (status & EV_MIC) {
- OUT4500(ai, EVACK, EV_MIC);
- airo_handle_cisco_mic(ai);
- }
-
- if (status & EV_LINK) {
- /* Link status changed */
- airo_handle_link(ai);
- }
-
- /* Check to see if there is something to receive */
- if (status & EV_RX)
- airo_handle_rx(ai);
-
- /* Check to see if a packet has been transmitted */
- if (status & (EV_TX | EV_TXCPY | EV_TXEXC))
- airo_handle_tx(ai, status);
-
- if ( status & ~STATUS_INTS & ~IGNORE_INTS ) {
- airo_print_warn(ai->dev->name, "Got weird status %x",
- status & ~STATUS_INTS & ~IGNORE_INTS );
- }
- }
-
- if (savedInterrupts)
- OUT4500(ai, EVINTEN, savedInterrupts);
-
- return IRQ_RETVAL(handled);
-}
-
-/*
- * Routines to talk to the card
- */
-
-/*
- * This was originally written for the 4500, hence the name
- * NOTE: If use with 8bit mode and SMP bad things will happen!
- * Why would some one do 8 bit IO in an SMP machine?!?
- */
-static void OUT4500( struct airo_info *ai, u16 reg, u16 val ) {
- if (test_bit(FLAG_MPI,&ai->flags))
- reg <<= 1;
- if ( !do8bitIO )
- outw( val, ai->dev->base_addr + reg );
- else {
- outb( val & 0xff, ai->dev->base_addr + reg );
- outb( val >> 8, ai->dev->base_addr + reg + 1 );
- }
-}
-
-static u16 IN4500( struct airo_info *ai, u16 reg ) {
- unsigned short rc;
-
- if (test_bit(FLAG_MPI,&ai->flags))
- reg <<= 1;
- if ( !do8bitIO )
- rc = inw( ai->dev->base_addr + reg );
- else {
- rc = inb( ai->dev->base_addr + reg );
- rc += ((int)inb( ai->dev->base_addr + reg + 1 )) << 8;
- }
- return rc;
-}
-
-static int enable_MAC(struct airo_info *ai, int lock)
-{
- int rc;
- Cmd cmd;
- Resp rsp;
-
- /* FLAG_RADIO_OFF : Radio disabled via /proc or Wireless Extensions
- * FLAG_RADIO_DOWN : Radio disabled via "ifconfig ethX down"
- * Note : we could try to use !netif_running(dev) in enable_MAC()
- * instead of this flag, but I don't trust it *within* the
- * open/close functions, and testing both flags together is
- * "cheaper" - Jean II */
- if (ai->flags & FLAG_RADIO_MASK) return SUCCESS;
-
- if (lock && down_interruptible(&ai->sem))
- return -ERESTARTSYS;
-
- if (!test_bit(FLAG_ENABLED, &ai->flags)) {
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = MAC_ENABLE;
- rc = issuecommand(ai, &cmd, &rsp);
- if (rc == SUCCESS)
- set_bit(FLAG_ENABLED, &ai->flags);
- } else
- rc = SUCCESS;
-
- if (lock)
- up(&ai->sem);
-
- if (rc)
- airo_print_err(ai->dev->name, "Cannot enable MAC");
- else if ((rsp.status & 0xFF00) != 0) {
- airo_print_err(ai->dev->name, "Bad MAC enable reason=%x, "
- "rid=%x, offset=%d", rsp.rsp0, rsp.rsp1, rsp.rsp2);
- rc = ERROR;
- }
- return rc;
-}
-
-static void disable_MAC( struct airo_info *ai, int lock ) {
- Cmd cmd;
- Resp rsp;
-
- if (lock && down_interruptible(&ai->sem))
- return;
-
- if (test_bit(FLAG_ENABLED, &ai->flags)) {
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = MAC_DISABLE; // disable in case already enabled
- issuecommand(ai, &cmd, &rsp);
- clear_bit(FLAG_ENABLED, &ai->flags);
- }
- if (lock)
- up(&ai->sem);
-}
-
-static void enable_interrupts( struct airo_info *ai ) {
- /* Enable the interrupts */
- OUT4500( ai, EVINTEN, STATUS_INTS );
-}
-
-static void disable_interrupts( struct airo_info *ai ) {
- OUT4500( ai, EVINTEN, 0 );
-}
-
-static void mpi_receive_802_3(struct airo_info *ai)
-{
- RxFid rxd;
- int len = 0;
- struct sk_buff *skb;
- char *buffer;
- int off = 0;
- MICBuffer micbuf;
-
- memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd));
- /* Make sure we got something */
- if (rxd.rdy && rxd.valid == 0) {
- len = rxd.len + 12;
- if (len < 12 || len > 2048)
- goto badrx;
-
- skb = dev_alloc_skb(len);
- if (!skb) {
- ai->dev->stats.rx_dropped++;
- goto badrx;
- }
- buffer = skb_put(skb,len);
- memcpy(buffer, ai->rxfids[0].virtual_host_addr, ETH_ALEN * 2);
- if (ai->micstats.enabled) {
- memcpy(&micbuf,
- ai->rxfids[0].virtual_host_addr + ETH_ALEN * 2,
- sizeof(micbuf));
- if (ntohs(micbuf.typelen) <= 0x05DC) {
- if (len <= sizeof(micbuf) + ETH_ALEN * 2)
- goto badmic;
-
- off = sizeof(micbuf);
- skb_trim (skb, len - off);
- }
- }
- memcpy(buffer + ETH_ALEN * 2,
- ai->rxfids[0].virtual_host_addr + ETH_ALEN * 2 + off,
- len - ETH_ALEN * 2 - off);
- if (decapsulate (ai, &micbuf, (etherHead*)buffer, len - off - ETH_ALEN * 2)) {
-badmic:
- dev_kfree_skb_irq (skb);
- goto badrx;
- }
-#ifdef WIRELESS_SPY
- if (ai->spy_data.spy_number > 0) {
- char *sa;
- struct iw_quality wstats;
- /* Prepare spy data : addr + qual */
- sa = buffer + ETH_ALEN;
- wstats.qual = 0; /* XXX Where do I get that info from ??? */
- wstats.level = 0;
- wstats.updated = 0;
- /* Update spy records */
- wireless_spy_update(ai->dev, sa, &wstats);
- }
-#endif /* WIRELESS_SPY */
-
- skb->ip_summed = CHECKSUM_NONE;
- skb->protocol = eth_type_trans(skb, ai->dev);
- netif_rx(skb);
- }
-badrx:
- if (rxd.valid == 0) {
- rxd.valid = 1;
- rxd.rdy = 0;
- rxd.len = PKTSIZE;
- memcpy_toio(ai->rxfids[0].card_ram_off, &rxd, sizeof(rxd));
- }
-}
-
-static void mpi_receive_802_11(struct airo_info *ai)
-{
- RxFid rxd;
- struct sk_buff *skb = NULL;
- u16 len, hdrlen = 0;
- __le16 fc;
- struct rx_hdr hdr;
- u16 gap;
- u16 *buffer;
- char *ptr = ai->rxfids[0].virtual_host_addr + 4;
-
- memcpy_fromio(&rxd, ai->rxfids[0].card_ram_off, sizeof(rxd));
- memcpy ((char *)&hdr, ptr, sizeof(hdr));
- ptr += sizeof(hdr);
- /* Bad CRC. Ignore packet */
- if (le16_to_cpu(hdr.status) & 2)
- hdr.len = 0;
- if (ai->wifidev == NULL)
- hdr.len = 0;
- len = le16_to_cpu(hdr.len);
- if (len > AIRO_DEF_MTU) {
- airo_print_err(ai->dev->name, "Bad size %d", len);
- goto badrx;
- }
- if (len == 0)
- goto badrx;
-
- fc = get_unaligned((__le16 *)ptr);
- hdrlen = header_len(fc);
-
- skb = dev_alloc_skb( len + hdrlen + 2 );
- if ( !skb ) {
- ai->dev->stats.rx_dropped++;
- goto badrx;
- }
- buffer = (u16*)skb_put (skb, len + hdrlen);
- memcpy ((char *)buffer, ptr, hdrlen);
- ptr += hdrlen;
- if (hdrlen == 24)
- ptr += 6;
- gap = get_unaligned_le16(ptr);
- ptr += sizeof(__le16);
- if (gap) {
- if (gap <= 8)
- ptr += gap;
- else
- airo_print_err(ai->dev->name,
- "gaplen too big. Problems will follow...");
- }
- memcpy ((char *)buffer + hdrlen, ptr, len);
- ptr += len;
-#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
- if (ai->spy_data.spy_number > 0) {
- char *sa;
- struct iw_quality wstats;
- /* Prepare spy data : addr + qual */
- sa = (char*)buffer + 10;
- wstats.qual = hdr.rssi[0];
- if (ai->rssi)
- wstats.level = 0x100 - ai->rssi[hdr.rssi[1]].rssidBm;
- else
- wstats.level = (hdr.rssi[1] + 321) / 2;
- wstats.noise = ai->wstats.qual.noise;
- wstats.updated = IW_QUAL_QUAL_UPDATED
- | IW_QUAL_LEVEL_UPDATED
- | IW_QUAL_DBM;
- /* Update spy records */
- wireless_spy_update(ai->dev, sa, &wstats);
- }
-#endif /* IW_WIRELESS_SPY */
- skb_reset_mac_header(skb);
- skb->pkt_type = PACKET_OTHERHOST;
- skb->dev = ai->wifidev;
- skb->protocol = htons(ETH_P_802_2);
- skb->ip_summed = CHECKSUM_NONE;
- netif_rx( skb );
-
-badrx:
- if (rxd.valid == 0) {
- rxd.valid = 1;
- rxd.rdy = 0;
- rxd.len = PKTSIZE;
- memcpy_toio(ai->rxfids[0].card_ram_off, &rxd, sizeof(rxd));
- }
-}
-
-static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
-{
- Cmd cmd;
- Resp rsp;
- int status;
- SsidRid mySsid;
- __le16 lastindex;
- WepKeyRid wkr;
- int rc;
-
- memset( &mySsid, 0, sizeof( mySsid ) );
- kfree (ai->flash);
- ai->flash = NULL;
-
- /* The NOP is the first step in getting the card going */
- cmd.cmd = NOP;
- cmd.parm0 = cmd.parm1 = cmd.parm2 = 0;
- if (lock && down_interruptible(&ai->sem))
- return ERROR;
- if ( issuecommand( ai, &cmd, &rsp ) != SUCCESS ) {
- if (lock)
- up(&ai->sem);
- return ERROR;
- }
- disable_MAC( ai, 0);
-
- // Let's figure out if we need to use the AUX port
- if (!test_bit(FLAG_MPI,&ai->flags)) {
- cmd.cmd = CMD_ENABLEAUX;
- if (issuecommand(ai, &cmd, &rsp) != SUCCESS) {
- if (lock)
- up(&ai->sem);
- airo_print_err(ai->dev->name, "Error checking for AUX port");
- return ERROR;
- }
- if (!aux_bap || rsp.status & 0xff00) {
- ai->bap_read = fast_bap_read;
- airo_print_dbg(ai->dev->name, "Doing fast bap_reads");
- } else {
- ai->bap_read = aux_bap_read;
- airo_print_dbg(ai->dev->name, "Doing AUX bap_reads");
- }
- }
- if (lock)
- up(&ai->sem);
- if (ai->config.len == 0) {
- int i;
- tdsRssiRid rssi_rid;
- CapabilityRid cap_rid;
-
- kfree(ai->APList);
- ai->APList = NULL;
- kfree(ai->SSID);
- ai->SSID = NULL;
- // general configuration (read/modify/write)
- status = readConfigRid(ai, lock);
- if ( status != SUCCESS ) return ERROR;
-
- status = readCapabilityRid(ai, &cap_rid, lock);
- if ( status != SUCCESS ) return ERROR;
-
- status = PC4500_readrid(ai,RID_RSSI,&rssi_rid,sizeof(rssi_rid),lock);
- if ( status == SUCCESS ) {
- if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
- memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
- }
- else {
- kfree(ai->rssi);
- ai->rssi = NULL;
- if (cap_rid.softCap & cpu_to_le16(8))
- ai->config.rmode |= RXMODE_NORMALIZED_RSSI;
- else
- airo_print_warn(ai->dev->name, "unknown received signal "
- "level scale");
- }
- ai->config.opmode = adhoc ? MODE_STA_IBSS : MODE_STA_ESS;
- ai->config.authType = AUTH_OPEN;
- ai->config.modulation = MOD_CCK;
-
- if (le16_to_cpu(cap_rid.len) >= sizeof(cap_rid) &&
- (cap_rid.extSoftCap & cpu_to_le16(1)) &&
- micsetup(ai) == SUCCESS) {
- ai->config.opmode |= MODE_MIC;
- set_bit(FLAG_MIC_CAPABLE, &ai->flags);
- }
-
- /* Save off the MAC */
- for( i = 0; i < ETH_ALEN; i++ ) {
- mac[i] = ai->config.macAddr[i];
- }
-
- /* Check to see if there are any insmod configured
- rates to add */
- if ( rates[0] ) {
- memset(ai->config.rates,0,sizeof(ai->config.rates));
- for( i = 0; i < 8 && rates[i]; i++ ) {
- ai->config.rates[i] = rates[i];
- }
- }
- if ( basic_rate > 0 ) {
- for( i = 0; i < 8; i++ ) {
- if ( ai->config.rates[i] == basic_rate ||
- !ai->config.rates ) {
- ai->config.rates[i] = basic_rate | 0x80;
- break;
- }
- }
- }
- set_bit (FLAG_COMMIT, &ai->flags);
- }
-
- /* Setup the SSIDs if present */
- if ( ssids[0] ) {
- int i;
- for( i = 0; i < 3 && ssids[i]; i++ ) {
- size_t len = strlen(ssids[i]);
- if (len > 32)
- len = 32;
- mySsid.ssids[i].len = cpu_to_le16(len);
- memcpy(mySsid.ssids[i].ssid, ssids[i], len);
- }
- mySsid.len = cpu_to_le16(sizeof(mySsid));
- }
-
- status = writeConfigRid(ai, lock);
- if ( status != SUCCESS ) return ERROR;
-
- /* Set up the SSID list */
- if ( ssids[0] ) {
- status = writeSsidRid(ai, &mySsid, lock);
- if ( status != SUCCESS ) return ERROR;
- }
-
- status = enable_MAC(ai, lock);
- if (status != SUCCESS)
- return ERROR;
-
- /* Grab the initial wep key, we gotta save it for auto_wep */
- rc = readWepKeyRid(ai, &wkr, 1, lock);
- if (rc == SUCCESS) do {
- lastindex = wkr.kindex;
- if (wkr.kindex == cpu_to_le16(0xffff)) {
- ai->defindex = wkr.mac[0];
- }
- rc = readWepKeyRid(ai, &wkr, 0, lock);
- } while(lastindex != wkr.kindex);
-
- try_auto_wep(ai);
-
- return SUCCESS;
-}
-
-static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) {
- // Im really paranoid about letting it run forever!
- int max_tries = 600000;
-
- if (IN4500(ai, EVSTAT) & EV_CMD)
- OUT4500(ai, EVACK, EV_CMD);
-
- OUT4500(ai, PARAM0, pCmd->parm0);
- OUT4500(ai, PARAM1, pCmd->parm1);
- OUT4500(ai, PARAM2, pCmd->parm2);
- OUT4500(ai, COMMAND, pCmd->cmd);
-
- while (max_tries-- && (IN4500(ai, EVSTAT) & EV_CMD) == 0) {
- if ((IN4500(ai, COMMAND)) == pCmd->cmd)
- // PC4500 didn't notice command, try again
- OUT4500(ai, COMMAND, pCmd->cmd);
- if (!in_atomic() && (max_tries & 255) == 0)
- schedule();
- }
-
- if ( max_tries == -1 ) {
- airo_print_err(ai->dev->name,
- "Max tries exceeded when issueing command");
- if (IN4500(ai, COMMAND) & COMMAND_BUSY)
- OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
- return ERROR;
- }
-
- // command completed
- pRsp->status = IN4500(ai, STATUS);
- pRsp->rsp0 = IN4500(ai, RESP0);
- pRsp->rsp1 = IN4500(ai, RESP1);
- pRsp->rsp2 = IN4500(ai, RESP2);
- if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET)
- airo_print_err(ai->dev->name,
- "cmd:%x status:%x rsp0:%x rsp1:%x rsp2:%x",
- pCmd->cmd, pRsp->status, pRsp->rsp0, pRsp->rsp1,
- pRsp->rsp2);
-
- // clear stuck command busy if necessary
- if (IN4500(ai, COMMAND) & COMMAND_BUSY) {
- OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
- }
- // acknowledge processing the status/response
- OUT4500(ai, EVACK, EV_CMD);
-
- return SUCCESS;
-}
-
-/* Sets up the bap to start exchange data. whichbap should
- * be one of the BAP0 or BAP1 defines. Locks should be held before
- * calling! */
-static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap )
-{
- int timeout = 50;
- int max_tries = 3;
-
- OUT4500(ai, SELECT0+whichbap, rid);
- OUT4500(ai, OFFSET0+whichbap, offset);
- while (1) {
- int status = IN4500(ai, OFFSET0+whichbap);
- if (status & BAP_BUSY) {
- /* This isn't really a timeout, but its kinda
- close */
- if (timeout--) {
- continue;
- }
- } else if ( status & BAP_ERR ) {
- /* invalid rid or offset */
- airo_print_err(ai->dev->name, "BAP error %x %d",
- status, whichbap );
- return ERROR;
- } else if (status & BAP_DONE) { // success
- return SUCCESS;
- }
- if ( !(max_tries--) ) {
- airo_print_err(ai->dev->name,
- "BAP setup error too many retries\n");
- return ERROR;
- }
- // -- PC4500 missed it, try again
- OUT4500(ai, SELECT0+whichbap, rid);
- OUT4500(ai, OFFSET0+whichbap, offset);
- timeout = 50;
- }
-}
-
-/* should only be called by aux_bap_read. This aux function and the
- following use concepts not documented in the developers guide. I
- got them from a patch given to my by Aironet */
-static u16 aux_setup(struct airo_info *ai, u16 page,
- u16 offset, u16 *len)
-{
- u16 next;
-
- OUT4500(ai, AUXPAGE, page);
- OUT4500(ai, AUXOFF, 0);
- next = IN4500(ai, AUXDATA);
- *len = IN4500(ai, AUXDATA)&0xff;
- if (offset != 4) OUT4500(ai, AUXOFF, offset);
- return next;
-}
-
-/* requires call to bap_setup() first */
-static int aux_bap_read(struct airo_info *ai, __le16 *pu16Dst,
- int bytelen, int whichbap)
-{
- u16 len;
- u16 page;
- u16 offset;
- u16 next;
- int words;
- int i;
- unsigned long flags;
-
- spin_lock_irqsave(&ai->aux_lock, flags);
- page = IN4500(ai, SWS0+whichbap);
- offset = IN4500(ai, SWS2+whichbap);
- next = aux_setup(ai, page, offset, &len);
- words = (bytelen+1)>>1;
-
- for (i=0; i<words;) {
- int count;
- count = (len>>1) < (words-i) ? (len>>1) : (words-i);
- if ( !do8bitIO )
- insw( ai->dev->base_addr+DATA0+whichbap,
- pu16Dst+i,count );
- else
- insb( ai->dev->base_addr+DATA0+whichbap,
- pu16Dst+i, count << 1 );
- i += count;
- if (i<words) {
- next = aux_setup(ai, next, 4, &len);
- }
- }
- spin_unlock_irqrestore(&ai->aux_lock, flags);
- return SUCCESS;
-}
-
-
-/* requires call to bap_setup() first */
-static int fast_bap_read(struct airo_info *ai, __le16 *pu16Dst,
- int bytelen, int whichbap)
-{
- bytelen = (bytelen + 1) & (~1); // round up to even value
- if ( !do8bitIO )
- insw( ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen>>1 );
- else
- insb( ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen );
- return SUCCESS;
-}
-
-/* requires call to bap_setup() first */
-static int bap_write(struct airo_info *ai, const __le16 *pu16Src,
- int bytelen, int whichbap)
-{
- bytelen = (bytelen + 1) & (~1); // round up to even value
- if ( !do8bitIO )
- outsw( ai->dev->base_addr+DATA0+whichbap,
- pu16Src, bytelen>>1 );
- else
- outsb( ai->dev->base_addr+DATA0+whichbap, pu16Src, bytelen );
- return SUCCESS;
-}
-
-static int PC4500_accessrid(struct airo_info *ai, u16 rid, u16 accmd)
-{
- Cmd cmd; /* for issuing commands */
- Resp rsp; /* response from commands */
- u16 status;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd = accmd;
- cmd.parm0 = rid;
- status = issuecommand(ai, &cmd, &rsp);
- if (status != 0) return status;
- if ( (rsp.status & 0x7F00) != 0) {
- return (accmd << 8) + (rsp.rsp0 & 0xFF);
- }
- return 0;
-}
-
-/* Note, that we are using BAP1 which is also used by transmit, so
- * we must get a lock. */
-static int PC4500_readrid(struct airo_info *ai, u16 rid, void *pBuf, int len, int lock)
-{
- u16 status;
- int rc = SUCCESS;
-
- if (lock) {
- if (down_interruptible(&ai->sem))
- return ERROR;
- }
- if (test_bit(FLAG_MPI,&ai->flags)) {
- Cmd cmd;
- Resp rsp;
-
- memset(&cmd, 0, sizeof(cmd));
- memset(&rsp, 0, sizeof(rsp));
- ai->config_desc.rid_desc.valid = 1;
- ai->config_desc.rid_desc.len = RIDSIZE;
- ai->config_desc.rid_desc.rid = 0;
- ai->config_desc.rid_desc.host_addr = ai->ridbus;
-
- cmd.cmd = CMD_ACCESS;
- cmd.parm0 = rid;
-
- memcpy_toio(ai->config_desc.card_ram_off,
- &ai->config_desc.rid_desc, sizeof(Rid));
-
- rc = issuecommand(ai, &cmd, &rsp);
-
- if (rsp.status & 0x7f00)
- rc = rsp.rsp0;
- if (!rc)
- memcpy(pBuf, ai->config_desc.virtual_host_addr, len);
- goto done;
- } else {
- if ((status = PC4500_accessrid(ai, rid, CMD_ACCESS))!=SUCCESS) {
- rc = status;
- goto done;
- }
- if (bap_setup(ai, rid, 0, BAP1) != SUCCESS) {
- rc = ERROR;
- goto done;
- }
- // read the rid length field
- bap_read(ai, pBuf, 2, BAP1);
- // length for remaining part of rid
- len = min(len, (int)le16_to_cpu(*(__le16*)pBuf)) - 2;
-
- if ( len <= 2 ) {
- airo_print_err(ai->dev->name,
- "Rid %x has a length of %d which is too short",
- (int)rid, (int)len );
- rc = ERROR;
- goto done;
- }
- // read remainder of the rid
- rc = bap_read(ai, ((__le16*)pBuf)+1, len, BAP1);
- }
-done:
- if (lock)
- up(&ai->sem);
- return rc;
-}
-
-/* Note, that we are using BAP1 which is also used by transmit, so
- * make sure this isnt called when a transmit is happening */
-static int PC4500_writerid(struct airo_info *ai, u16 rid,
- const void *pBuf, int len, int lock)
-{
- u16 status;
- int rc = SUCCESS;
-
- *(__le16*)pBuf = cpu_to_le16((u16)len);
-
- if (lock) {
- if (down_interruptible(&ai->sem))
- return ERROR;
- }
- if (test_bit(FLAG_MPI,&ai->flags)) {
- Cmd cmd;
- Resp rsp;
-
- if (test_bit(FLAG_ENABLED, &ai->flags) && (RID_WEP_TEMP != rid))
- airo_print_err(ai->dev->name,
- "%s: MAC should be disabled (rid=%04x)",
- __func__, rid);
- memset(&cmd, 0, sizeof(cmd));
- memset(&rsp, 0, sizeof(rsp));
-
- ai->config_desc.rid_desc.valid = 1;
- ai->config_desc.rid_desc.len = *((u16 *)pBuf);
- ai->config_desc.rid_desc.rid = 0;
-
- cmd.cmd = CMD_WRITERID;
- cmd.parm0 = rid;
-
- memcpy_toio(ai->config_desc.card_ram_off,
- &ai->config_desc.rid_desc, sizeof(Rid));
-
- if (len < 4 || len > 2047) {
- airo_print_err(ai->dev->name, "%s: len=%d", __func__, len);
- rc = -1;
- } else {
- memcpy((char *)ai->config_desc.virtual_host_addr,
- pBuf, len);
-
- rc = issuecommand(ai, &cmd, &rsp);
- if ((rc & 0xff00) != 0) {
- airo_print_err(ai->dev->name, "%s: Write rid Error %d",
- __func__, rc);
- airo_print_err(ai->dev->name, "%s: Cmd=%04x",
- __func__, cmd.cmd);
- }
-
- if ((rsp.status & 0x7f00))
- rc = rsp.rsp0;
- }
- } else {
- // --- first access so that we can write the rid data
- if ( (status = PC4500_accessrid(ai, rid, CMD_ACCESS)) != 0) {
- rc = status;
- goto done;
- }
- // --- now write the rid data
- if (bap_setup(ai, rid, 0, BAP1) != SUCCESS) {
- rc = ERROR;
- goto done;
- }
- bap_write(ai, pBuf, len, BAP1);
- // ---now commit the rid data
- rc = PC4500_accessrid(ai, rid, 0x100|CMD_ACCESS);
- }
-done:
- if (lock)
- up(&ai->sem);
- return rc;
-}
-
-/* Allocates a FID to be used for transmitting packets. We only use
- one for now. */
-static u16 transmit_allocate(struct airo_info *ai, int lenPayload, int raw)
-{
- unsigned int loop = 3000;
- Cmd cmd;
- Resp rsp;
- u16 txFid;
- __le16 txControl;
-
- cmd.cmd = CMD_ALLOCATETX;
- cmd.parm0 = lenPayload;
- if (down_interruptible(&ai->sem))
- return ERROR;
- if (issuecommand(ai, &cmd, &rsp) != SUCCESS) {
- txFid = ERROR;
- goto done;
- }
- if ( (rsp.status & 0xFF00) != 0) {
- txFid = ERROR;
- goto done;
- }
- /* wait for the allocate event/indication
- * It makes me kind of nervous that this can just sit here and spin,
- * but in practice it only loops like four times. */
- while (((IN4500(ai, EVSTAT) & EV_ALLOC) == 0) && --loop);
- if (!loop) {
- txFid = ERROR;
- goto done;
- }
-
- // get the allocated fid and acknowledge
- txFid = IN4500(ai, TXALLOCFID);
- OUT4500(ai, EVACK, EV_ALLOC);
-
- /* The CARD is pretty cool since it converts the ethernet packet
- * into 802.11. Also note that we don't release the FID since we
- * will be using the same one over and over again. */
- /* We only have to setup the control once since we are not
- * releasing the fid. */
- if (raw)
- txControl = cpu_to_le16(TXCTL_TXOK | TXCTL_TXEX | TXCTL_802_11
- | TXCTL_ETHERNET | TXCTL_NORELEASE);
- else
- txControl = cpu_to_le16(TXCTL_TXOK | TXCTL_TXEX | TXCTL_802_3
- | TXCTL_ETHERNET | TXCTL_NORELEASE);
- if (bap_setup(ai, txFid, 0x0008, BAP1) != SUCCESS)
- txFid = ERROR;
- else
- bap_write(ai, &txControl, sizeof(txControl), BAP1);
-
-done:
- up(&ai->sem);
-
- return txFid;
-}
-
-/* In general BAP1 is dedicated to transmiting packets. However,
- since we need a BAP when accessing RIDs, we also use BAP1 for that.
- Make sure the BAP1 spinlock is held when this is called. */
-static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket)
-{
- __le16 payloadLen;
- Cmd cmd;
- Resp rsp;
- int miclen = 0;
- u16 txFid = len;
- MICBuffer pMic;
-
- len >>= 16;
-
- if (len <= ETH_ALEN * 2) {
- airo_print_warn(ai->dev->name, "Short packet %d", len);
- return ERROR;
- }
- len -= ETH_ALEN * 2;
-
- if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled &&
- (ntohs(((__be16 *)pPacket)[6]) != 0x888E)) {
- if (encapsulate(ai,(etherHead *)pPacket,&pMic,len) != SUCCESS)
- return ERROR;
- miclen = sizeof(pMic);
- }
- // packet is destination[6], source[6], payload[len-12]
- // write the payload length and dst/src/payload
- if (bap_setup(ai, txFid, 0x0036, BAP1) != SUCCESS) return ERROR;
- /* The hardware addresses aren't counted as part of the payload, so
- * we have to subtract the 12 bytes for the addresses off */
- payloadLen = cpu_to_le16(len + miclen);
- bap_write(ai, &payloadLen, sizeof(payloadLen),BAP1);
- bap_write(ai, (__le16*)pPacket, sizeof(etherHead), BAP1);
- if (miclen)
- bap_write(ai, (__le16*)&pMic, miclen, BAP1);
- bap_write(ai, (__le16*)(pPacket + sizeof(etherHead)), len, BAP1);
- // issue the transmit command
- memset( &cmd, 0, sizeof( cmd ) );
- cmd.cmd = CMD_TRANSMIT;
- cmd.parm0 = txFid;
- if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR;
- if ( (rsp.status & 0xFF00) != 0) return ERROR;
- return SUCCESS;
-}
-
-static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket)
-{
- __le16 fc, payloadLen;
- Cmd cmd;
- Resp rsp;
- int hdrlen;
- static u8 tail[(30-10) + 2 + 6] = {[30-10] = 6};
- /* padding of header to full size + le16 gaplen (6) + gaplen bytes */
- u16 txFid = len;
- len >>= 16;
-
- fc = *(__le16*)pPacket;
- hdrlen = header_len(fc);
-
- if (len < hdrlen) {
- airo_print_warn(ai->dev->name, "Short packet %d", len);
- return ERROR;
- }
-
- /* packet is 802.11 header + payload
- * write the payload length and dst/src/payload */
- if (bap_setup(ai, txFid, 6, BAP1) != SUCCESS) return ERROR;
- /* The 802.11 header aren't counted as part of the payload, so
- * we have to subtract the header bytes off */
- payloadLen = cpu_to_le16(len-hdrlen);
- bap_write(ai, &payloadLen, sizeof(payloadLen),BAP1);
- if (bap_setup(ai, txFid, 0x0014, BAP1) != SUCCESS) return ERROR;
- bap_write(ai, (__le16 *)pPacket, hdrlen, BAP1);
- bap_write(ai, (__le16 *)(tail + (hdrlen - 10)), 38 - hdrlen, BAP1);
-
- bap_write(ai, (__le16 *)(pPacket + hdrlen), len - hdrlen, BAP1);
- // issue the transmit command
- memset( &cmd, 0, sizeof( cmd ) );
- cmd.cmd = CMD_TRANSMIT;
- cmd.parm0 = txFid;
- if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR;
- if ( (rsp.status & 0xFF00) != 0) return ERROR;
- return SUCCESS;
-}
-
-/*
- * This is the proc_fs routines. It is a bit messier than I would
- * like! Feel free to clean it up!
- */
-
-static ssize_t proc_read( struct file *file,
- char __user *buffer,
- size_t len,
- loff_t *offset);
-
-static ssize_t proc_write( struct file *file,
- const char __user *buffer,
- size_t len,
- loff_t *offset );
-static int proc_close( struct inode *inode, struct file *file );
-
-static int proc_stats_open( struct inode *inode, struct file *file );
-static int proc_statsdelta_open( struct inode *inode, struct file *file );
-static int proc_status_open( struct inode *inode, struct file *file );
-static int proc_SSID_open( struct inode *inode, struct file *file );
-static int proc_APList_open( struct inode *inode, struct file *file );
-static int proc_BSSList_open( struct inode *inode, struct file *file );
-static int proc_config_open( struct inode *inode, struct file *file );
-static int proc_wepkey_open( struct inode *inode, struct file *file );
-
-static const struct file_operations proc_statsdelta_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .open = proc_statsdelta_open,
- .release = proc_close
-};
-
-static const struct file_operations proc_stats_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .open = proc_stats_open,
- .release = proc_close
-};
-
-static const struct file_operations proc_status_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .open = proc_status_open,
- .release = proc_close
-};
-
-static const struct file_operations proc_SSID_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .write = proc_write,
- .open = proc_SSID_open,
- .release = proc_close
-};
-
-static const struct file_operations proc_BSSList_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .write = proc_write,
- .open = proc_BSSList_open,
- .release = proc_close
-};
-
-static const struct file_operations proc_APList_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .write = proc_write,
- .open = proc_APList_open,
- .release = proc_close
-};
-
-static const struct file_operations proc_config_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .write = proc_write,
- .open = proc_config_open,
- .release = proc_close
-};
-
-static const struct file_operations proc_wepkey_ops = {
- .owner = THIS_MODULE,
- .read = proc_read,
- .write = proc_write,
- .open = proc_wepkey_open,
- .release = proc_close
-};
-
-static struct proc_dir_entry *airo_entry;
-
-struct proc_data {
- int release_buffer;
- int readlen;
- char *rbuffer;
- int writelen;
- int maxwritelen;
- char *wbuffer;
- void (*on_close) (struct inode *, struct file *);
-};
-
-static int setup_proc_entry( struct net_device *dev,
- struct airo_info *apriv ) {
- struct proc_dir_entry *entry;
- /* First setup the device directory */
- strcpy(apriv->proc_name,dev->name);
- apriv->proc_entry = create_proc_entry(apriv->proc_name,
- S_IFDIR|airo_perm,
- airo_entry);
- if (!apriv->proc_entry)
- goto fail;
- apriv->proc_entry->uid = proc_uid;
- apriv->proc_entry->gid = proc_gid;
-
- /* Setup the StatsDelta */
- entry = proc_create_data("StatsDelta",
- S_IFREG | (S_IRUGO&proc_perm),
- apriv->proc_entry, &proc_statsdelta_ops, dev);
- if (!entry)
- goto fail_stats_delta;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
-
- /* Setup the Stats */
- entry = proc_create_data("Stats",
- S_IFREG | (S_IRUGO&proc_perm),
- apriv->proc_entry, &proc_stats_ops, dev);
- if (!entry)
- goto fail_stats;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
-
- /* Setup the Status */
- entry = proc_create_data("Status",
- S_IFREG | (S_IRUGO&proc_perm),
- apriv->proc_entry, &proc_status_ops, dev);
- if (!entry)
- goto fail_status;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
-
- /* Setup the Config */
- entry = proc_create_data("Config",
- S_IFREG | proc_perm,
- apriv->proc_entry, &proc_config_ops, dev);
- if (!entry)
- goto fail_config;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
-
- /* Setup the SSID */
- entry = proc_create_data("SSID",
- S_IFREG | proc_perm,
- apriv->proc_entry, &proc_SSID_ops, dev);
- if (!entry)
- goto fail_ssid;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
-
- /* Setup the APList */
- entry = proc_create_data("APList",
- S_IFREG | proc_perm,
- apriv->proc_entry, &proc_APList_ops, dev);
- if (!entry)
- goto fail_aplist;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
-
- /* Setup the BSSList */
- entry = proc_create_data("BSSList",
- S_IFREG | proc_perm,
- apriv->proc_entry, &proc_BSSList_ops, dev);
- if (!entry)
- goto fail_bsslist;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
-
- /* Setup the WepKey */
- entry = proc_create_data("WepKey",
- S_IFREG | proc_perm,
- apriv->proc_entry, &proc_wepkey_ops, dev);
- if (!entry)
- goto fail_wepkey;
- entry->uid = proc_uid;
- entry->gid = proc_gid;
-
- return 0;
-
-fail_wepkey:
- remove_proc_entry("BSSList", apriv->proc_entry);
-fail_bsslist:
- remove_proc_entry("APList", apriv->proc_entry);
-fail_aplist:
- remove_proc_entry("SSID", apriv->proc_entry);
-fail_ssid:
- remove_proc_entry("Config", apriv->proc_entry);
-fail_config:
- remove_proc_entry("Status", apriv->proc_entry);
-fail_status:
- remove_proc_entry("Stats", apriv->proc_entry);
-fail_stats:
- remove_proc_entry("StatsDelta", apriv->proc_entry);
-fail_stats_delta:
- remove_proc_entry(apriv->proc_name, airo_entry);
-fail:
- return -ENOMEM;
-}
-
-static int takedown_proc_entry( struct net_device *dev,
- struct airo_info *apriv ) {
- if ( !apriv->proc_entry->namelen ) return 0;
- remove_proc_entry("Stats",apriv->proc_entry);
- remove_proc_entry("StatsDelta",apriv->proc_entry);
- remove_proc_entry("Status",apriv->proc_entry);
- remove_proc_entry("Config",apriv->proc_entry);
- remove_proc_entry("SSID",apriv->proc_entry);
- remove_proc_entry("APList",apriv->proc_entry);
- remove_proc_entry("BSSList",apriv->proc_entry);
- remove_proc_entry("WepKey",apriv->proc_entry);
- remove_proc_entry(apriv->proc_name,airo_entry);
- return 0;
-}
-
-/*
- * What we want from the proc_fs is to be able to efficiently read
- * and write the configuration. To do this, we want to read the
- * configuration when the file is opened and write it when the file is
- * closed. So basically we allocate a read buffer at open and fill it
- * with data, and allocate a write buffer and read it at close.
- */
-
-/*
- * The read routine is generic, it relies on the preallocated rbuffer
- * to supply the data.
- */
-static ssize_t proc_read( struct file *file,
- char __user *buffer,
- size_t len,
- loff_t *offset )
-{
- struct proc_data *priv = file->private_data;
-
- if (!priv->rbuffer)
- return -EINVAL;
-
- return simple_read_from_buffer(buffer, len, offset, priv->rbuffer,
- priv->readlen);
-}
-
-/*
- * The write routine is generic, it fills in a preallocated rbuffer
- * to supply the data.
- */
-static ssize_t proc_write( struct file *file,
- const char __user *buffer,
- size_t len,
- loff_t *offset )
-{
- loff_t pos = *offset;
- struct proc_data *priv = (struct proc_data*)file->private_data;
-
- if (!priv->wbuffer)
- return -EINVAL;
-
- if (pos < 0)
- return -EINVAL;
- if (pos >= priv->maxwritelen)
- return 0;
- if (len > priv->maxwritelen - pos)
- len = priv->maxwritelen - pos;
- if (copy_from_user(priv->wbuffer + pos, buffer, len))
- return -EFAULT;
- if ( pos + len > priv->writelen )
- priv->writelen = len + file->f_pos;
- *offset = pos + len;
- return len;
-}
-
-static int proc_status_open(struct inode *inode, struct file *file)
-{
- struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
- struct airo_info *apriv = dev->ml_priv;
- CapabilityRid cap_rid;
- StatusRid status_rid;
- u16 mode;
- int i;
-
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
- return -ENOMEM;
- data = (struct proc_data *)file->private_data;
- if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) {
- kfree (file->private_data);
- return -ENOMEM;
- }
-
- readStatusRid(apriv, &status_rid, 1);
- readCapabilityRid(apriv, &cap_rid, 1);
-
- mode = le16_to_cpu(status_rid.mode);
-
- i = sprintf(data->rbuffer, "Status: %s%s%s%s%s%s%s%s%s\n",
- mode & 1 ? "CFG ": "",
- mode & 2 ? "ACT ": "",
- mode & 0x10 ? "SYN ": "",
- mode & 0x20 ? "LNK ": "",
- mode & 0x40 ? "LEAP ": "",
- mode & 0x80 ? "PRIV ": "",
- mode & 0x100 ? "KEY ": "",
- mode & 0x200 ? "WEP ": "",
- mode & 0x8000 ? "ERR ": "");
- sprintf( data->rbuffer+i, "Mode: %x\n"
- "Signal Strength: %d\n"
- "Signal Quality: %d\n"
- "SSID: %-.*s\n"
- "AP: %-.16s\n"
- "Freq: %d\n"
- "BitRate: %dmbs\n"
- "Driver Version: %s\n"
- "Device: %s\nManufacturer: %s\nFirmware Version: %s\n"
- "Radio type: %x\nCountry: %x\nHardware Version: %x\n"
- "Software Version: %x\nSoftware Subversion: %x\n"
- "Boot block version: %x\n",
- le16_to_cpu(status_rid.mode),
- le16_to_cpu(status_rid.normalizedSignalStrength),
- le16_to_cpu(status_rid.signalQuality),
- le16_to_cpu(status_rid.SSIDlen),
- status_rid.SSID,
- status_rid.apName,
- le16_to_cpu(status_rid.channel),
- le16_to_cpu(status_rid.currentXmitRate) / 2,
- version,
- cap_rid.prodName,
- cap_rid.manName,
- cap_rid.prodVer,
- le16_to_cpu(cap_rid.radioType),
- le16_to_cpu(cap_rid.country),
- le16_to_cpu(cap_rid.hardVer),
- le16_to_cpu(cap_rid.softVer),
- le16_to_cpu(cap_rid.softSubVer),
- le16_to_cpu(cap_rid.bootBlockVer));
- data->readlen = strlen( data->rbuffer );
- return 0;
-}
-
-static int proc_stats_rid_open(struct inode*, struct file*, u16);
-static int proc_statsdelta_open( struct inode *inode,
- struct file *file ) {
- if (file->f_mode&FMODE_WRITE) {
- return proc_stats_rid_open(inode, file, RID_STATSDELTACLEAR);
- }
- return proc_stats_rid_open(inode, file, RID_STATSDELTA);
-}
-
-static int proc_stats_open( struct inode *inode, struct file *file ) {
- return proc_stats_rid_open(inode, file, RID_STATS);
-}
-
-static int proc_stats_rid_open( struct inode *inode,
- struct file *file,
- u16 rid )
-{
- struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
- struct airo_info *apriv = dev->ml_priv;
- StatsRid stats;
- int i, j;
- __le32 *vals = stats.vals;
- int len;
-
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
- return -ENOMEM;
- data = (struct proc_data *)file->private_data;
- if ((data->rbuffer = kmalloc( 4096, GFP_KERNEL )) == NULL) {
- kfree (file->private_data);
- return -ENOMEM;
- }
-
- readStatsRid(apriv, &stats, rid, 1);
- len = le16_to_cpu(stats.len);
-
- j = 0;
- for(i=0; statsLabels[i]!=(char *)-1 && i*4<len; i++) {
- if (!statsLabels[i]) continue;
- if (j+strlen(statsLabels[i])+16>4096) {
- airo_print_warn(apriv->dev->name,
- "Potentially disasterous buffer overflow averted!");
- break;
- }
- j+=sprintf(data->rbuffer+j, "%s: %u\n", statsLabels[i],
- le32_to_cpu(vals[i]));
- }
- if (i*4 >= len) {
- airo_print_warn(apriv->dev->name, "Got a short rid");
- }
- data->readlen = j;
- return 0;
-}
-
-static int get_dec_u16( char *buffer, int *start, int limit ) {
- u16 value;
- int valid = 0;
- for( value = 0; buffer[*start] >= '0' &&
- buffer[*start] <= '9' &&
- *start < limit; (*start)++ ) {
- valid = 1;
- value *= 10;
- value += buffer[*start] - '0';
- }
- if ( !valid ) return -1;
- return value;
-}
-
-static int airo_config_commit(struct net_device *dev,
- struct iw_request_info *info, void *zwrq,
- char *extra);
-
-static inline int sniffing_mode(struct airo_info *ai)
-{
- return le16_to_cpu(ai->config.rmode & RXMODE_MASK) >=
- le16_to_cpu(RXMODE_RFMON);
-}
-
-static void proc_config_on_close(struct inode *inode, struct file *file)
-{
- struct proc_data *data = file->private_data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
- struct airo_info *ai = dev->ml_priv;
- char *line;
-
- if ( !data->writelen ) return;
-
- readConfigRid(ai, 1);
- set_bit (FLAG_COMMIT, &ai->flags);
-
- line = data->wbuffer;
- while( line[0] ) {
-/*** Mode processing */
- if ( !strncmp( line, "Mode: ", 6 ) ) {
- line += 6;
- if (sniffing_mode(ai))
- set_bit (FLAG_RESET, &ai->flags);
- ai->config.rmode &= ~RXMODE_FULL_MASK;
- clear_bit (FLAG_802_11, &ai->flags);
- ai->config.opmode &= ~MODE_CFG_MASK;
- ai->config.scanMode = SCANMODE_ACTIVE;
- if ( line[0] == 'a' ) {
- ai->config.opmode |= MODE_STA_IBSS;
- } else {
- ai->config.opmode |= MODE_STA_ESS;
- if ( line[0] == 'r' ) {
- ai->config.rmode |= RXMODE_RFMON | RXMODE_DISABLE_802_3_HEADER;
- ai->config.scanMode = SCANMODE_PASSIVE;
- set_bit (FLAG_802_11, &ai->flags);
- } else if ( line[0] == 'y' ) {
- ai->config.rmode |= RXMODE_RFMON_ANYBSS | RXMODE_DISABLE_802_3_HEADER;
- ai->config.scanMode = SCANMODE_PASSIVE;
- set_bit (FLAG_802_11, &ai->flags);
- } else if ( line[0] == 'l' )
- ai->config.rmode |= RXMODE_LANMON;
- }
- set_bit (FLAG_COMMIT, &ai->flags);
- }
-
-/*** Radio status */
- else if (!strncmp(line,"Radio: ", 7)) {
- line += 7;
- if (!strncmp(line,"off",3)) {
- set_bit (FLAG_RADIO_OFF, &ai->flags);
- } else {
- clear_bit (FLAG_RADIO_OFF, &ai->flags);
- }
- }
-/*** NodeName processing */
- else if ( !strncmp( line, "NodeName: ", 10 ) ) {
- int j;
-
- line += 10;
- memset( ai->config.nodeName, 0, 16 );
-/* Do the name, assume a space between the mode and node name */
- for( j = 0; j < 16 && line[j] != '\n'; j++ ) {
- ai->config.nodeName[j] = line[j];
- }
- set_bit (FLAG_COMMIT, &ai->flags);
- }
-
-/*** PowerMode processing */
- else if ( !strncmp( line, "PowerMode: ", 11 ) ) {
- line += 11;
- if ( !strncmp( line, "PSPCAM", 6 ) ) {
- ai->config.powerSaveMode = POWERSAVE_PSPCAM;
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "PSP", 3 ) ) {
- ai->config.powerSaveMode = POWERSAVE_PSP;
- set_bit (FLAG_COMMIT, &ai->flags);
- } else {
- ai->config.powerSaveMode = POWERSAVE_CAM;
- set_bit (FLAG_COMMIT, &ai->flags);
- }
- } else if ( !strncmp( line, "DataRates: ", 11 ) ) {
- int v, i = 0, k = 0; /* i is index into line,
- k is index to rates */
-
- line += 11;
- while((v = get_dec_u16(line, &i, 3))!=-1) {
- ai->config.rates[k++] = (u8)v;
- line += i + 1;
- i = 0;
- }
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "Channel: ", 9 ) ) {
- int v, i = 0;
- line += 9;
- v = get_dec_u16(line, &i, i+3);
- if ( v != -1 ) {
- ai->config.channelSet = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- }
- } else if ( !strncmp( line, "XmitPower: ", 11 ) ) {
- int v, i = 0;
- line += 11;
- v = get_dec_u16(line, &i, i+3);
- if ( v != -1 ) {
- ai->config.txPower = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- }
- } else if ( !strncmp( line, "WEP: ", 5 ) ) {
- line += 5;
- switch( line[0] ) {
- case 's':
- ai->config.authType = AUTH_SHAREDKEY;
- break;
- case 'e':
- ai->config.authType = AUTH_ENCRYPT;
- break;
- default:
- ai->config.authType = AUTH_OPEN;
- break;
- }
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "LongRetryLimit: ", 16 ) ) {
- int v, i = 0;
-
- line += 16;
- v = get_dec_u16(line, &i, 3);
- v = (v<0) ? 0 : ((v>255) ? 255 : v);
- ai->config.longRetryLimit = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "ShortRetryLimit: ", 17 ) ) {
- int v, i = 0;
-
- line += 17;
- v = get_dec_u16(line, &i, 3);
- v = (v<0) ? 0 : ((v>255) ? 255 : v);
- ai->config.shortRetryLimit = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "RTSThreshold: ", 14 ) ) {
- int v, i = 0;
-
- line += 14;
- v = get_dec_u16(line, &i, 4);
- v = (v<0) ? 0 : ((v>AIRO_DEF_MTU) ? AIRO_DEF_MTU : v);
- ai->config.rtsThres = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "TXMSDULifetime: ", 16 ) ) {
- int v, i = 0;
-
- line += 16;
- v = get_dec_u16(line, &i, 5);
- v = (v<0) ? 0 : v;
- ai->config.txLifetime = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "RXMSDULifetime: ", 16 ) ) {
- int v, i = 0;
-
- line += 16;
- v = get_dec_u16(line, &i, 5);
- v = (v<0) ? 0 : v;
- ai->config.rxLifetime = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "TXDiversity: ", 13 ) ) {
- ai->config.txDiversity =
- (line[13]=='l') ? 1 :
- ((line[13]=='r')? 2: 3);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "RXDiversity: ", 13 ) ) {
- ai->config.rxDiversity =
- (line[13]=='l') ? 1 :
- ((line[13]=='r')? 2: 3);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "FragThreshold: ", 15 ) ) {
- int v, i = 0;
-
- line += 15;
- v = get_dec_u16(line, &i, 4);
- v = (v<256) ? 256 : ((v>AIRO_DEF_MTU) ? AIRO_DEF_MTU : v);
- v = v & 0xfffe; /* Make sure its even */
- ai->config.fragThresh = cpu_to_le16(v);
- set_bit (FLAG_COMMIT, &ai->flags);
- } else if (!strncmp(line, "Modulation: ", 12)) {
- line += 12;
- switch(*line) {
- case 'd': ai->config.modulation=MOD_DEFAULT; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 'c': ai->config.modulation=MOD_CCK; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 'm': ai->config.modulation=MOD_MOK; set_bit(FLAG_COMMIT, &ai->flags); break;
- default: airo_print_warn(ai->dev->name, "Unknown modulation");
- }
- } else if (!strncmp(line, "Preamble: ", 10)) {
- line += 10;
- switch(*line) {
- case 'a': ai->config.preamble=PREAMBLE_AUTO; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 'l': ai->config.preamble=PREAMBLE_LONG; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 's': ai->config.preamble=PREAMBLE_SHORT; set_bit(FLAG_COMMIT, &ai->flags); break;
- default: airo_print_warn(ai->dev->name, "Unknown preamble");
- }
- } else {
- airo_print_warn(ai->dev->name, "Couldn't figure out %s", line);
- }
- while( line[0] && line[0] != '\n' ) line++;
- if ( line[0] ) line++;
- }
- airo_config_commit(dev, NULL, NULL, NULL);
-}
-
-static char *get_rmode(__le16 mode)
-{
- switch(mode & RXMODE_MASK) {
- case RXMODE_RFMON: return "rfmon";
- case RXMODE_RFMON_ANYBSS: return "yna (any) bss rfmon";
- case RXMODE_LANMON: return "lanmon";
- }
- return "ESS";
-}
-
-static int proc_config_open(struct inode *inode, struct file *file)
-{
- struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
- struct airo_info *ai = dev->ml_priv;
- int i;
- __le16 mode;
-
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
- return -ENOMEM;
- data = (struct proc_data *)file->private_data;
- if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) {
- kfree (file->private_data);
- return -ENOMEM;
- }
- if ((data->wbuffer = kzalloc( 2048, GFP_KERNEL )) == NULL) {
- kfree (data->rbuffer);
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->maxwritelen = 2048;
- data->on_close = proc_config_on_close;
-
- readConfigRid(ai, 1);
-
- mode = ai->config.opmode & MODE_CFG_MASK;
- i = sprintf( data->rbuffer,
- "Mode: %s\n"
- "Radio: %s\n"
- "NodeName: %-16s\n"
- "PowerMode: %s\n"
- "DataRates: %d %d %d %d %d %d %d %d\n"
- "Channel: %d\n"
- "XmitPower: %d\n",
- mode == MODE_STA_IBSS ? "adhoc" :
- mode == MODE_STA_ESS ? get_rmode(ai->config.rmode):
- mode == MODE_AP ? "AP" :
- mode == MODE_AP_RPTR ? "AP RPTR" : "Error",
- test_bit(FLAG_RADIO_OFF, &ai->flags) ? "off" : "on",
- ai->config.nodeName,
- ai->config.powerSaveMode == POWERSAVE_CAM ? "CAM" :
- ai->config.powerSaveMode == POWERSAVE_PSP ? "PSP" :
- ai->config.powerSaveMode == POWERSAVE_PSPCAM ? "PSPCAM" :
- "Error",
- (int)ai->config.rates[0],
- (int)ai->config.rates[1],
- (int)ai->config.rates[2],
- (int)ai->config.rates[3],
- (int)ai->config.rates[4],
- (int)ai->config.rates[5],
- (int)ai->config.rates[6],
- (int)ai->config.rates[7],
- le16_to_cpu(ai->config.channelSet),
- le16_to_cpu(ai->config.txPower)
- );
- sprintf( data->rbuffer + i,
- "LongRetryLimit: %d\n"
- "ShortRetryLimit: %d\n"
- "RTSThreshold: %d\n"
- "TXMSDULifetime: %d\n"
- "RXMSDULifetime: %d\n"
- "TXDiversity: %s\n"
- "RXDiversity: %s\n"
- "FragThreshold: %d\n"
- "WEP: %s\n"
- "Modulation: %s\n"
- "Preamble: %s\n",
- le16_to_cpu(ai->config.longRetryLimit),
- le16_to_cpu(ai->config.shortRetryLimit),
- le16_to_cpu(ai->config.rtsThres),
- le16_to_cpu(ai->config.txLifetime),
- le16_to_cpu(ai->config.rxLifetime),
- ai->config.txDiversity == 1 ? "left" :
- ai->config.txDiversity == 2 ? "right" : "both",
- ai->config.rxDiversity == 1 ? "left" :
- ai->config.rxDiversity == 2 ? "right" : "both",
- le16_to_cpu(ai->config.fragThresh),
- ai->config.authType == AUTH_ENCRYPT ? "encrypt" :
- ai->config.authType == AUTH_SHAREDKEY ? "shared" : "open",
- ai->config.modulation == MOD_DEFAULT ? "default" :
- ai->config.modulation == MOD_CCK ? "cck" :
- ai->config.modulation == MOD_MOK ? "mok" : "error",
- ai->config.preamble == PREAMBLE_AUTO ? "auto" :
- ai->config.preamble == PREAMBLE_LONG ? "long" :
- ai->config.preamble == PREAMBLE_SHORT ? "short" : "error"
- );
- data->readlen = strlen( data->rbuffer );
- return 0;
-}
-
-static void proc_SSID_on_close(struct inode *inode, struct file *file)
-{
- struct proc_data *data = (struct proc_data *)file->private_data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
- struct airo_info *ai = dev->ml_priv;
- SsidRid SSID_rid;
- int i;
- char *p = data->wbuffer;
- char *end = p + data->writelen;
-
- if (!data->writelen)
- return;
-
- *end = '\n'; /* sentinel; we have space for it */
-
- memset(&SSID_rid, 0, sizeof(SSID_rid));
-
- for (i = 0; i < 3 && p < end; i++) {
- int j = 0;
- /* copy up to 32 characters from this line */
- while (*p != '\n' && j < 32)
- SSID_rid.ssids[i].ssid[j++] = *p++;
- if (j == 0)
- break;
- SSID_rid.ssids[i].len = cpu_to_le16(j);
- /* skip to the beginning of the next line */
- while (*p++ != '\n')
- ;
- }
- if (i)
- SSID_rid.len = cpu_to_le16(sizeof(SSID_rid));
- disable_MAC(ai, 1);
- writeSsidRid(ai, &SSID_rid, 1);
- enable_MAC(ai, 1);
-}
-
-static inline u8 hexVal(char c) {
- if (c>='0' && c<='9') return c -= '0';
- if (c>='a' && c<='f') return c -= 'a'-10;
- if (c>='A' && c<='F') return c -= 'A'-10;
- return 0;
-}
-
-static void proc_APList_on_close( struct inode *inode, struct file *file ) {
- struct proc_data *data = (struct proc_data *)file->private_data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
- struct airo_info *ai = dev->ml_priv;
- APListRid APList_rid;
- int i;
-
- if ( !data->writelen ) return;
-
- memset( &APList_rid, 0, sizeof(APList_rid) );
- APList_rid.len = cpu_to_le16(sizeof(APList_rid));
-
- for( i = 0; i < 4 && data->writelen >= (i+1)*6*3; i++ ) {
- int j;
- for( j = 0; j < 6*3 && data->wbuffer[j+i*6*3]; j++ ) {
- switch(j%3) {
- case 0:
- APList_rid.ap[i][j/3]=
- hexVal(data->wbuffer[j+i*6*3])<<4;
- break;
- case 1:
- APList_rid.ap[i][j/3]|=
- hexVal(data->wbuffer[j+i*6*3]);
- break;
- }
- }
- }
- disable_MAC(ai, 1);
- writeAPListRid(ai, &APList_rid, 1);
- enable_MAC(ai, 1);
-}
-
-/* This function wraps PC4500_writerid with a MAC disable */
-static int do_writerid( struct airo_info *ai, u16 rid, const void *rid_data,
- int len, int dummy ) {
- int rc;
-
- disable_MAC(ai, 1);
- rc = PC4500_writerid(ai, rid, rid_data, len, 1);
- enable_MAC(ai, 1);
- return rc;
-}
-
-/* Returns the WEP key at the specified index, or -1 if that key does
- * not exist. The buffer is assumed to be at least 16 bytes in length.
- */
-static int get_wep_key(struct airo_info *ai, u16 index, char *buf, u16 buflen)
-{
- WepKeyRid wkr;
- int rc;
- __le16 lastindex;
-
- rc = readWepKeyRid(ai, &wkr, 1, 1);
- if (rc != SUCCESS)
- return -1;
- do {
- lastindex = wkr.kindex;
- if (le16_to_cpu(wkr.kindex) == index) {
- int klen = min_t(int, buflen, le16_to_cpu(wkr.klen));
- memcpy(buf, wkr.key, klen);
- return klen;
- }
- rc = readWepKeyRid(ai, &wkr, 0, 1);
- if (rc != SUCCESS)
- return -1;
- } while (lastindex != wkr.kindex);
- return -1;
-}
-
-static int get_wep_tx_idx(struct airo_info *ai)
-{
- WepKeyRid wkr;
- int rc;
- __le16 lastindex;
-
- rc = readWepKeyRid(ai, &wkr, 1, 1);
- if (rc != SUCCESS)
- return -1;
- do {
- lastindex = wkr.kindex;
- if (wkr.kindex == cpu_to_le16(0xffff))
- return wkr.mac[0];
- rc = readWepKeyRid(ai, &wkr, 0, 1);
- if (rc != SUCCESS)
- return -1;
- } while (lastindex != wkr.kindex);
- return -1;
-}
-
-static int set_wep_key(struct airo_info *ai, u16 index, const char *key,
- u16 keylen, int perm, int lock)
-{
- static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
- WepKeyRid wkr;
- int rc;
-
- if (keylen == 0) {
- airo_print_err(ai->dev->name, "%s: key length to set was zero",
- __func__);
- return -1;
- }
-
- memset(&wkr, 0, sizeof(wkr));
- wkr.len = cpu_to_le16(sizeof(wkr));
- wkr.kindex = cpu_to_le16(index);
- wkr.klen = cpu_to_le16(keylen);
- memcpy(wkr.key, key, keylen);
- memcpy(wkr.mac, macaddr, ETH_ALEN);
-
- if (perm) disable_MAC(ai, lock);
- rc = writeWepKeyRid(ai, &wkr, perm, lock);
- if (perm) enable_MAC(ai, lock);
- return rc;
-}
-
-static int set_wep_tx_idx(struct airo_info *ai, u16 index, int perm, int lock)
-{
- WepKeyRid wkr;
- int rc;
-
- memset(&wkr, 0, sizeof(wkr));
- wkr.len = cpu_to_le16(sizeof(wkr));
- wkr.kindex = cpu_to_le16(0xffff);
- wkr.mac[0] = (char)index;
-
- if (perm) {
- ai->defindex = (char)index;
- disable_MAC(ai, lock);
- }
-
- rc = writeWepKeyRid(ai, &wkr, perm, lock);
-
- if (perm)
- enable_MAC(ai, lock);
- return rc;
-}
-
-static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
- struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
- struct airo_info *ai = dev->ml_priv;
- int i, rc;
- char key[16];
- u16 index = 0;
- int j = 0;
-
- memset(key, 0, sizeof(key));
-
- data = (struct proc_data *)file->private_data;
- if ( !data->writelen ) return;
-
- if (data->wbuffer[0] >= '0' && data->wbuffer[0] <= '3' &&
- (data->wbuffer[1] == ' ' || data->wbuffer[1] == '\n')) {
- index = data->wbuffer[0] - '0';
- if (data->wbuffer[1] == '\n') {
- rc = set_wep_tx_idx(ai, index, 1, 1);
- if (rc < 0) {
- airo_print_err(ai->dev->name, "failed to set "
- "WEP transmit index to %d: %d.",
- index, rc);
- }
- return;
- }
- j = 2;
- } else {
- airo_print_err(ai->dev->name, "WepKey passed invalid key index");
- return;
- }
-
- for( i = 0; i < 16*3 && data->wbuffer[i+j]; i++ ) {
- switch(i%3) {
- case 0:
- key[i/3] = hexVal(data->wbuffer[i+j])<<4;
- break;
- case 1:
- key[i/3] |= hexVal(data->wbuffer[i+j]);
- break;
- }
- }
-
- rc = set_wep_key(ai, index, key, i/3, 1, 1);
- if (rc < 0) {
- airo_print_err(ai->dev->name, "failed to set WEP key at index "
- "%d: %d.", index, rc);
- }
-}
-
-static int proc_wepkey_open( struct inode *inode, struct file *file )
-{
- struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
- struct airo_info *ai = dev->ml_priv;
- char *ptr;
- WepKeyRid wkr;
- __le16 lastindex;
- int j=0;
- int rc;
-
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
- return -ENOMEM;
- memset(&wkr, 0, sizeof(wkr));
- data = (struct proc_data *)file->private_data;
- if ((data->rbuffer = kzalloc( 180, GFP_KERNEL )) == NULL) {
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->writelen = 0;
- data->maxwritelen = 80;
- if ((data->wbuffer = kzalloc( 80, GFP_KERNEL )) == NULL) {
- kfree (data->rbuffer);
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->on_close = proc_wepkey_on_close;
-
- ptr = data->rbuffer;
- strcpy(ptr, "No wep keys\n");
- rc = readWepKeyRid(ai, &wkr, 1, 1);
- if (rc == SUCCESS) do {
- lastindex = wkr.kindex;
- if (wkr.kindex == cpu_to_le16(0xffff)) {
- j += sprintf(ptr+j, "Tx key = %d\n",
- (int)wkr.mac[0]);
- } else {
- j += sprintf(ptr+j, "Key %d set with length = %d\n",
- le16_to_cpu(wkr.kindex),
- le16_to_cpu(wkr.klen));
- }
- readWepKeyRid(ai, &wkr, 0, 1);
- } while((lastindex != wkr.kindex) && (j < 180-30));
-
- data->readlen = strlen( data->rbuffer );
- return 0;
-}
-
-static int proc_SSID_open(struct inode *inode, struct file *file)
-{
- struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
- struct airo_info *ai = dev->ml_priv;
- int i;
- char *ptr;
- SsidRid SSID_rid;
-
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
- return -ENOMEM;
- data = (struct proc_data *)file->private_data;
- if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) {
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->writelen = 0;
- data->maxwritelen = 33*3;
- /* allocate maxwritelen + 1; we'll want a sentinel */
- if ((data->wbuffer = kzalloc(33*3 + 1, GFP_KERNEL)) == NULL) {
- kfree (data->rbuffer);
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->on_close = proc_SSID_on_close;
-
- readSsidRid(ai, &SSID_rid);
- ptr = data->rbuffer;
- for (i = 0; i < 3; i++) {
- int j;
- size_t len = le16_to_cpu(SSID_rid.ssids[i].len);
- if (!len)
- break;
- if (len > 32)
- len = 32;
- for (j = 0; j < len && SSID_rid.ssids[i].ssid[j]; j++)
- *ptr++ = SSID_rid.ssids[i].ssid[j];
- *ptr++ = '\n';
- }
- *ptr = '\0';
- data->readlen = strlen( data->rbuffer );
- return 0;
-}
-
-static int proc_APList_open( struct inode *inode, struct file *file ) {
- struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
- struct airo_info *ai = dev->ml_priv;
- int i;
- char *ptr;
- APListRid APList_rid;
-
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
- return -ENOMEM;
- data = (struct proc_data *)file->private_data;
- if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) {
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->writelen = 0;
- data->maxwritelen = 4*6*3;
- if ((data->wbuffer = kzalloc( data->maxwritelen, GFP_KERNEL )) == NULL) {
- kfree (data->rbuffer);
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->on_close = proc_APList_on_close;
-
- readAPListRid(ai, &APList_rid);
- ptr = data->rbuffer;
- for( i = 0; i < 4; i++ ) {
-// We end when we find a zero MAC
- if ( !*(int*)APList_rid.ap[i] &&
- !*(int*)&APList_rid.ap[i][2]) break;
- ptr += sprintf(ptr, "%pM\n", APList_rid.ap[i]);
- }
- if (i==0) ptr += sprintf(ptr, "Not using specific APs\n");
-
- *ptr = '\0';
- data->readlen = strlen( data->rbuffer );
- return 0;
-}
-
-static int proc_BSSList_open( struct inode *inode, struct file *file ) {
- struct proc_data *data;
- struct proc_dir_entry *dp = PDE(inode);
- struct net_device *dev = dp->data;
- struct airo_info *ai = dev->ml_priv;
- char *ptr;
- BSSListRid BSSList_rid;
- int rc;
- /* If doLoseSync is not 1, we won't do a Lose Sync */
- int doLoseSync = -1;
-
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
- return -ENOMEM;
- data = (struct proc_data *)file->private_data;
- if ((data->rbuffer = kmalloc( 1024, GFP_KERNEL )) == NULL) {
- kfree (file->private_data);
- return -ENOMEM;
- }
- data->writelen = 0;
- data->maxwritelen = 0;
- data->wbuffer = NULL;
- data->on_close = NULL;
-
- if (file->f_mode & FMODE_WRITE) {
- if (!(file->f_mode & FMODE_READ)) {
- Cmd cmd;
- Resp rsp;
-
- if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_LISTBSS;
- if (down_interruptible(&ai->sem))
- return -ERESTARTSYS;
- issuecommand(ai, &cmd, &rsp);
- up(&ai->sem);
- data->readlen = 0;
- return 0;
- }
- doLoseSync = 1;
- }
- ptr = data->rbuffer;
- /* There is a race condition here if there are concurrent opens.
- Since it is a rare condition, we'll just live with it, otherwise
- we have to add a spin lock... */
- rc = readBSSListRid(ai, doLoseSync, &BSSList_rid);
- while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) {
- ptr += sprintf(ptr, "%pM %*s rssi = %d",
- BSSList_rid.bssid,
- (int)BSSList_rid.ssidLen,
- BSSList_rid.ssid,
- le16_to_cpu(BSSList_rid.dBm));
- ptr += sprintf(ptr, " channel = %d %s %s %s %s\n",
- le16_to_cpu(BSSList_rid.dsChannel),
- BSSList_rid.cap & CAP_ESS ? "ESS" : "",
- BSSList_rid.cap & CAP_IBSS ? "adhoc" : "",
- BSSList_rid.cap & CAP_PRIVACY ? "wep" : "",
- BSSList_rid.cap & CAP_SHORTHDR ? "shorthdr" : "");
- rc = readBSSListRid(ai, 0, &BSSList_rid);
- }
- *ptr = '\0';
- data->readlen = strlen( data->rbuffer );
- return 0;
-}
-
-static int proc_close( struct inode *inode, struct file *file )
-{
- struct proc_data *data = file->private_data;
-
- if (data->on_close != NULL)
- data->on_close(inode, file);
- kfree(data->rbuffer);
- kfree(data->wbuffer);
- kfree(data);
- return 0;
-}
-
-/* Since the card doesn't automatically switch to the right WEP mode,
- we will make it do it. If the card isn't associated, every secs we
- will switch WEP modes to see if that will help. If the card is
- associated we will check every minute to see if anything has
- changed. */
-static void timer_func( struct net_device *dev ) {
- struct airo_info *apriv = dev->ml_priv;
-
-/* We don't have a link so try changing the authtype */
- readConfigRid(apriv, 0);
- disable_MAC(apriv, 0);
- switch(apriv->config.authType) {
- case AUTH_ENCRYPT:
-/* So drop to OPEN */
- apriv->config.authType = AUTH_OPEN;
- break;
- case AUTH_SHAREDKEY:
- if (apriv->keyindex < auto_wep) {
- set_wep_tx_idx(apriv, apriv->keyindex, 0, 0);
- apriv->config.authType = AUTH_SHAREDKEY;
- apriv->keyindex++;
- } else {
- /* Drop to ENCRYPT */
- apriv->keyindex = 0;
- set_wep_tx_idx(apriv, apriv->defindex, 0, 0);
- apriv->config.authType = AUTH_ENCRYPT;
- }
- break;
- default: /* We'll escalate to SHAREDKEY */
- apriv->config.authType = AUTH_SHAREDKEY;
- }
- set_bit (FLAG_COMMIT, &apriv->flags);
- writeConfigRid(apriv, 0);
- enable_MAC(apriv, 0);
- up(&apriv->sem);
-
-/* Schedule check to see if the change worked */
- clear_bit(JOB_AUTOWEP, &apriv->jobs);
- apriv->expires = RUN_AT(HZ*3);
-}
-
-#ifdef CONFIG_PCI
-static int __devinit airo_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *pent)
-{
- struct net_device *dev;
-
- if (pci_enable_device(pdev))
- return -ENODEV;
- pci_set_master(pdev);
-
- if (pdev->device == 0x5000 || pdev->device == 0xa504)
- dev = _init_airo_card(pdev->irq, pdev->resource[0].start, 0, pdev, &pdev->dev);
- else
- dev = _init_airo_card(pdev->irq, pdev->resource[2].start, 0, pdev, &pdev->dev);
- if (!dev) {
- pci_disable_device(pdev);
- return -ENODEV;
- }
-
- pci_set_drvdata(pdev, dev);
- return 0;
-}
-
-static void __devexit airo_pci_remove(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
-
- airo_print_info(dev->name, "Unregistering...");
- stop_airo_card(dev, 1);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
-}
-
-static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct airo_info *ai = dev->ml_priv;
- Cmd cmd;
- Resp rsp;
-
- if (!ai->APList)
- ai->APList = kmalloc(sizeof(APListRid), GFP_KERNEL);
- if (!ai->APList)
- return -ENOMEM;
- if (!ai->SSID)
- ai->SSID = kmalloc(sizeof(SsidRid), GFP_KERNEL);
- if (!ai->SSID)
- return -ENOMEM;
- readAPListRid(ai, ai->APList);
- readSsidRid(ai, ai->SSID);
- memset(&cmd, 0, sizeof(cmd));
- /* the lock will be released at the end of the resume callback */
- if (down_interruptible(&ai->sem))
- return -EAGAIN;
- disable_MAC(ai, 0);
- netif_device_detach(dev);
- ai->power = state;
- cmd.cmd = HOSTSLEEP;
- issuecommand(ai, &cmd, &rsp);
-
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
- pci_save_state(pdev);
- return pci_set_power_state(pdev, pci_choose_state(pdev, state));
-}
-
-static int airo_pci_resume(struct pci_dev *pdev)
-{
- struct net_device *dev = pci_get_drvdata(pdev);
- struct airo_info *ai = dev->ml_priv;
- pci_power_t prev_state = pdev->current_state;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- pci_enable_wake(pdev, PCI_D0, 0);
-
- if (prev_state != PCI_D1) {
- reset_card(dev, 0);
- mpi_init_descriptors(ai);
- setup_card(ai, dev->dev_addr, 0);
- clear_bit(FLAG_RADIO_OFF, &ai->flags);
- clear_bit(FLAG_PENDING_XMIT, &ai->flags);
- } else {
- OUT4500(ai, EVACK, EV_AWAKEN);
- OUT4500(ai, EVACK, EV_AWAKEN);
- msleep(100);
- }
-
- set_bit(FLAG_COMMIT, &ai->flags);
- disable_MAC(ai, 0);
- msleep(200);
- if (ai->SSID) {
- writeSsidRid(ai, ai->SSID, 0);
- kfree(ai->SSID);
- ai->SSID = NULL;
- }
- if (ai->APList) {
- writeAPListRid(ai, ai->APList, 0);
- kfree(ai->APList);
- ai->APList = NULL;
- }
- writeConfigRid(ai, 0);
- enable_MAC(ai, 0);
- ai->power = PMSG_ON;
- netif_device_attach(dev);
- netif_wake_queue(dev);
- enable_interrupts(ai);
- up(&ai->sem);
- return 0;
-}
-#endif
-
-static int __init airo_init_module( void )
-{
- int i;
-
- airo_entry = create_proc_entry("driver/aironet",
- S_IFDIR | airo_perm,
- NULL);
-
- if (airo_entry) {
- airo_entry->uid = proc_uid;
- airo_entry->gid = proc_gid;
- }
-
- for (i = 0; i < 4 && io[i] && irq[i]; i++) {
- airo_print_info("", "Trying to configure ISA adapter at irq=%d "
- "io=0x%x", irq[i], io[i] );
- if (init_airo_card( irq[i], io[i], 0, NULL ))
- /* do nothing */ ;
- }
-
-#ifdef CONFIG_PCI
- airo_print_info("", "Probing for PCI adapters");
- i = pci_register_driver(&airo_driver);
- airo_print_info("", "Finished probing for PCI adapters");
-
- if (i) {
- remove_proc_entry("driver/aironet", NULL);
- return i;
- }
-#endif
-
- /* Always exit with success, as we are a library module
- * as well as a driver module
- */
- return 0;
-}
-
-static void __exit airo_cleanup_module( void )
-{
- struct airo_info *ai;
- while(!list_empty(&airo_devices)) {
- ai = list_entry(airo_devices.next, struct airo_info, dev_list);
- airo_print_info(ai->dev->name, "Unregistering...");
- stop_airo_card(ai->dev, 1);
- }
-#ifdef CONFIG_PCI
- pci_unregister_driver(&airo_driver);
-#endif
- remove_proc_entry("driver/aironet", NULL);
-}
-
-/*
- * Initial Wireless Extension code for Aironet driver by :
- * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 17 November 00
- * Conversion to new driver API by :
- * Jean Tourrilhes <jt@hpl.hp.com> - HPL - 26 March 02
- * Javier also did a good amount of work here, adding some new extensions
- * and fixing my code. Let's just say that without him this code just
- * would not work at all... - Jean II
- */
-
-static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi)
-{
- if (!rssi_rid)
- return 0;
-
- return (0x100 - rssi_rid[rssi].rssidBm);
-}
-
-static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm)
-{
- int i;
-
- if (!rssi_rid)
- return 0;
-
- for (i = 0; i < 256; i++)
- if (rssi_rid[i].rssidBm == dbm)
- return rssi_rid[i].rssipct;
-
- return 0;
-}
-
-
-static int airo_get_quality (StatusRid *status_rid, CapabilityRid *cap_rid)
-{
- int quality = 0;
- u16 sq;
-
- if ((status_rid->mode & cpu_to_le16(0x3f)) != cpu_to_le16(0x3f))
- return 0;
-
- if (!(cap_rid->hardCap & cpu_to_le16(8)))
- return 0;
-
- sq = le16_to_cpu(status_rid->signalQuality);
- if (memcmp(cap_rid->prodName, "350", 3))
- if (sq > 0x20)
- quality = 0;
- else
- quality = 0x20 - sq;
- else
- if (sq > 0xb0)
- quality = 0;
- else if (sq < 0x10)
- quality = 0xa0;
- else
- quality = 0xb0 - sq;
- return quality;
-}
-
-#define airo_get_max_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x20 : 0xa0)
-#define airo_get_avg_quality(cap_rid) (memcmp((cap_rid)->prodName, "350", 3) ? 0x10 : 0x50);
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get protocol name
- */
-static int airo_get_name(struct net_device *dev,
- struct iw_request_info *info,
- char *cwrq,
- char *extra)
-{
- strcpy(cwrq, "IEEE 802.11-DS");
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set frequency
- */
-static int airo_set_freq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *fwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- int rc = -EINPROGRESS; /* Call commit handler */
-
- /* If setting by frequency, convert to a channel */
- if(fwrq->e == 1) {
- int f = fwrq->m / 100000;
-
- /* Hack to fall through... */
- fwrq->e = 0;
- fwrq->m = ieee80211_freq_to_dsss_chan(f);
- }
- /* Setting by channel number */
- if((fwrq->m > 1000) || (fwrq->e > 0))
- rc = -EOPNOTSUPP;
- else {
- int channel = fwrq->m;
- /* We should do a better check than that,
- * based on the card capability !!! */
- if((channel < 1) || (channel > 14)) {
- airo_print_dbg(dev->name, "New channel value of %d is invalid!",
- fwrq->m);
- rc = -EINVAL;
- } else {
- readConfigRid(local, 1);
- /* Yes ! We can set it !!! */
- local->config.channelSet = cpu_to_le16(channel);
- set_bit (FLAG_COMMIT, &local->flags);
- }
- }
- return rc;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get frequency
- */
-static int airo_get_freq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *fwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- StatusRid status_rid; /* Card status info */
- int ch;
-
- readConfigRid(local, 1);
- if ((local->config.opmode & MODE_CFG_MASK) == MODE_STA_ESS)
- status_rid.channel = local->config.channelSet;
- else
- readStatusRid(local, &status_rid, 1);
-
- ch = le16_to_cpu(status_rid.channel);
- if((ch > 0) && (ch < 15)) {
- fwrq->m = ieee80211_dsss_chan_to_freq(ch) * 100000;
- fwrq->e = 1;
- } else {
- fwrq->m = ch;
- fwrq->e = 0;
- }
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set ESSID
- */
-static int airo_set_essid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- SsidRid SSID_rid; /* SSIDs */
-
- /* Reload the list of current SSID */
- readSsidRid(local, &SSID_rid);
-
- /* Check if we asked for `any' */
- if(dwrq->flags == 0) {
- /* Just send an empty SSID list */
- memset(&SSID_rid, 0, sizeof(SSID_rid));
- } else {
- int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
-
- /* Check the size of the string */
- if(dwrq->length > IW_ESSID_MAX_SIZE) {
- return -E2BIG ;
- }
- /* Check if index is valid */
- if((index < 0) || (index >= 4)) {
- return -EINVAL;
- }
-
- /* Set the SSID */
- memset(SSID_rid.ssids[index].ssid, 0,
- sizeof(SSID_rid.ssids[index].ssid));
- memcpy(SSID_rid.ssids[index].ssid, extra, dwrq->length);
- SSID_rid.ssids[index].len = cpu_to_le16(dwrq->length);
- }
- SSID_rid.len = cpu_to_le16(sizeof(SSID_rid));
- /* Write it to the card */
- disable_MAC(local, 1);
- writeSsidRid(local, &SSID_rid, 1);
- enable_MAC(local, 1);
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get ESSID
- */
-static int airo_get_essid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- StatusRid status_rid; /* Card status info */
-
- readStatusRid(local, &status_rid, 1);
-
- /* Note : if dwrq->flags != 0, we should
- * get the relevant SSID from the SSID list... */
-
- /* Get the current SSID */
- memcpy(extra, status_rid.SSID, le16_to_cpu(status_rid.SSIDlen));
- /* If none, we may want to get the one that was set */
-
- /* Push it out ! */
- dwrq->length = le16_to_cpu(status_rid.SSIDlen);
- dwrq->flags = 1; /* active */
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set AP address
- */
-static int airo_set_wap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *awrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- Cmd cmd;
- Resp rsp;
- APListRid APList_rid;
- static const u8 any[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
- static const u8 off[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
-
- if (awrq->sa_family != ARPHRD_ETHER)
- return -EINVAL;
- else if (!memcmp(any, awrq->sa_data, ETH_ALEN) ||
- !memcmp(off, awrq->sa_data, ETH_ALEN)) {
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_LOSE_SYNC;
- if (down_interruptible(&local->sem))
- return -ERESTARTSYS;
- issuecommand(local, &cmd, &rsp);
- up(&local->sem);
- } else {
- memset(&APList_rid, 0, sizeof(APList_rid));
- APList_rid.len = cpu_to_le16(sizeof(APList_rid));
- memcpy(APList_rid.ap[0], awrq->sa_data, ETH_ALEN);
- disable_MAC(local, 1);
- writeAPListRid(local, &APList_rid, 1);
- enable_MAC(local, 1);
- }
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get AP address
- */
-static int airo_get_wap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *awrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- StatusRid status_rid; /* Card status info */
-
- readStatusRid(local, &status_rid, 1);
-
- /* Tentative. This seems to work, wow, I'm lucky !!! */
- memcpy(awrq->sa_data, status_rid.bssid[0], ETH_ALEN);
- awrq->sa_family = ARPHRD_ETHER;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Nickname
- */
-static int airo_set_nick(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
-
- /* Check the size of the string */
- if(dwrq->length > 16) {
- return -E2BIG;
- }
- readConfigRid(local, 1);
- memset(local->config.nodeName, 0, sizeof(local->config.nodeName));
- memcpy(local->config.nodeName, extra, dwrq->length);
- set_bit (FLAG_COMMIT, &local->flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Nickname
- */
-static int airo_get_nick(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- strncpy(extra, local->config.nodeName, 16);
- extra[16] = '\0';
- dwrq->length = strlen(extra);
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Bit-Rate
- */
-static int airo_set_rate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- CapabilityRid cap_rid; /* Card capability info */
- u8 brate = 0;
- int i;
-
- /* First : get a valid bit rate value */
- readCapabilityRid(local, &cap_rid, 1);
-
- /* Which type of value ? */
- if((vwrq->value < 8) && (vwrq->value >= 0)) {
- /* Setting by rate index */
- /* Find value in the magic rate table */
- brate = cap_rid.supportedRates[vwrq->value];
- } else {
- /* Setting by frequency value */
- u8 normvalue = (u8) (vwrq->value/500000);
-
- /* Check if rate is valid */
- for(i = 0 ; i < 8 ; i++) {
- if(normvalue == cap_rid.supportedRates[i]) {
- brate = normvalue;
- break;
- }
- }
- }
- /* -1 designed the max rate (mostly auto mode) */
- if(vwrq->value == -1) {
- /* Get the highest available rate */
- for(i = 0 ; i < 8 ; i++) {
- if(cap_rid.supportedRates[i] == 0)
- break;
- }
- if(i != 0)
- brate = cap_rid.supportedRates[i - 1];
- }
- /* Check that it is valid */
- if(brate == 0) {
- return -EINVAL;
- }
-
- readConfigRid(local, 1);
- /* Now, check if we want a fixed or auto value */
- if(vwrq->fixed == 0) {
- /* Fill all the rates up to this max rate */
- memset(local->config.rates, 0, 8);
- for(i = 0 ; i < 8 ; i++) {
- local->config.rates[i] = cap_rid.supportedRates[i];
- if(local->config.rates[i] == brate)
- break;
- }
- } else {
- /* Fixed mode */
- /* One rate, fixed */
- memset(local->config.rates, 0, 8);
- local->config.rates[0] = brate;
- }
- set_bit (FLAG_COMMIT, &local->flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Bit-Rate
- */
-static int airo_get_rate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- StatusRid status_rid; /* Card status info */
-
- readStatusRid(local, &status_rid, 1);
-
- vwrq->value = le16_to_cpu(status_rid.currentXmitRate) * 500000;
- /* If more than one rate, set auto */
- readConfigRid(local, 1);
- vwrq->fixed = (local->config.rates[1] == 0);
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set RTS threshold
- */
-static int airo_set_rts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- int rthr = vwrq->value;
-
- if(vwrq->disabled)
- rthr = AIRO_DEF_MTU;
- if((rthr < 0) || (rthr > AIRO_DEF_MTU)) {
- return -EINVAL;
- }
- readConfigRid(local, 1);
- local->config.rtsThres = cpu_to_le16(rthr);
- set_bit (FLAG_COMMIT, &local->flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get RTS threshold
- */
-static int airo_get_rts(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- vwrq->value = le16_to_cpu(local->config.rtsThres);
- vwrq->disabled = (vwrq->value >= AIRO_DEF_MTU);
- vwrq->fixed = 1;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Fragmentation threshold
- */
-static int airo_set_frag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- int fthr = vwrq->value;
-
- if(vwrq->disabled)
- fthr = AIRO_DEF_MTU;
- if((fthr < 256) || (fthr > AIRO_DEF_MTU)) {
- return -EINVAL;
- }
- fthr &= ~0x1; /* Get an even value - is it really needed ??? */
- readConfigRid(local, 1);
- local->config.fragThresh = cpu_to_le16(fthr);
- set_bit (FLAG_COMMIT, &local->flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Fragmentation threshold
- */
-static int airo_get_frag(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- vwrq->value = le16_to_cpu(local->config.fragThresh);
- vwrq->disabled = (vwrq->value >= AIRO_DEF_MTU);
- vwrq->fixed = 1;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Mode of Operation
- */
-static int airo_set_mode(struct net_device *dev,
- struct iw_request_info *info,
- __u32 *uwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- int reset = 0;
-
- readConfigRid(local, 1);
- if (sniffing_mode(local))
- reset = 1;
-
- switch(*uwrq) {
- case IW_MODE_ADHOC:
- local->config.opmode &= ~MODE_CFG_MASK;
- local->config.opmode |= MODE_STA_IBSS;
- local->config.rmode &= ~RXMODE_FULL_MASK;
- local->config.scanMode = SCANMODE_ACTIVE;
- clear_bit (FLAG_802_11, &local->flags);
- break;
- case IW_MODE_INFRA:
- local->config.opmode &= ~MODE_CFG_MASK;
- local->config.opmode |= MODE_STA_ESS;
- local->config.rmode &= ~RXMODE_FULL_MASK;
- local->config.scanMode = SCANMODE_ACTIVE;
- clear_bit (FLAG_802_11, &local->flags);
- break;
- case IW_MODE_MASTER:
- local->config.opmode &= ~MODE_CFG_MASK;
- local->config.opmode |= MODE_AP;
- local->config.rmode &= ~RXMODE_FULL_MASK;
- local->config.scanMode = SCANMODE_ACTIVE;
- clear_bit (FLAG_802_11, &local->flags);
- break;
- case IW_MODE_REPEAT:
- local->config.opmode &= ~MODE_CFG_MASK;
- local->config.opmode |= MODE_AP_RPTR;
- local->config.rmode &= ~RXMODE_FULL_MASK;
- local->config.scanMode = SCANMODE_ACTIVE;
- clear_bit (FLAG_802_11, &local->flags);
- break;
- case IW_MODE_MONITOR:
- local->config.opmode &= ~MODE_CFG_MASK;
- local->config.opmode |= MODE_STA_ESS;
- local->config.rmode &= ~RXMODE_FULL_MASK;
- local->config.rmode |= RXMODE_RFMON | RXMODE_DISABLE_802_3_HEADER;
- local->config.scanMode = SCANMODE_PASSIVE;
- set_bit (FLAG_802_11, &local->flags);
- break;
- default:
- return -EINVAL;
- }
- if (reset)
- set_bit (FLAG_RESET, &local->flags);
- set_bit (FLAG_COMMIT, &local->flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Mode of Operation
- */
-static int airo_get_mode(struct net_device *dev,
- struct iw_request_info *info,
- __u32 *uwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- /* If not managed, assume it's ad-hoc */
- switch (local->config.opmode & MODE_CFG_MASK) {
- case MODE_STA_ESS:
- *uwrq = IW_MODE_INFRA;
- break;
- case MODE_AP:
- *uwrq = IW_MODE_MASTER;
- break;
- case MODE_AP_RPTR:
- *uwrq = IW_MODE_REPEAT;
- break;
- default:
- *uwrq = IW_MODE_ADHOC;
- }
-
- return 0;
-}
-
-static inline int valid_index(struct airo_info *ai, int index)
-{
- return (index >= 0) && (index <= ai->max_wep_idx);
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Encryption Key
- */
-static int airo_set_encode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- int perm = (dwrq->flags & IW_ENCODE_TEMP ? 0 : 1);
- __le16 currentAuthType = local->config.authType;
- int rc = 0;
-
- if (!local->wep_capable)
- return -EOPNOTSUPP;
-
- readConfigRid(local, 1);
-
- /* Basic checking: do we have a key to set ?
- * Note : with the new API, it's impossible to get a NULL pointer.
- * Therefore, we need to check a key size == 0 instead.
- * New version of iwconfig properly set the IW_ENCODE_NOKEY flag
- * when no key is present (only change flags), but older versions
- * don't do it. - Jean II */
- if (dwrq->length > 0) {
- wep_key_t key;
- int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
- int current_index;
-
- /* Check the size of the key */
- if (dwrq->length > MAX_KEY_SIZE) {
- return -EINVAL;
- }
-
- current_index = get_wep_tx_idx(local);
- if (current_index < 0)
- current_index = 0;
-
- /* Check the index (none -> use current) */
- if (!valid_index(local, index))
- index = current_index;
-
- /* Set the length */
- if (dwrq->length > MIN_KEY_SIZE)
- key.len = MAX_KEY_SIZE;
- else
- if (dwrq->length > 0)
- key.len = MIN_KEY_SIZE;
- else
- /* Disable the key */
- key.len = 0;
- /* Check if the key is not marked as invalid */
- if(!(dwrq->flags & IW_ENCODE_NOKEY)) {
- /* Cleanup */
- memset(key.key, 0, MAX_KEY_SIZE);
- /* Copy the key in the driver */
- memcpy(key.key, extra, dwrq->length);
- /* Send the key to the card */
- rc = set_wep_key(local, index, key.key, key.len, perm, 1);
- if (rc < 0) {
- airo_print_err(local->dev->name, "failed to set"
- " WEP key at index %d: %d.",
- index, rc);
- return rc;
- }
- }
- /* WE specify that if a valid key is set, encryption
- * should be enabled (user may turn it off later)
- * This is also how "iwconfig ethX key on" works */
- if((index == current_index) && (key.len > 0) &&
- (local->config.authType == AUTH_OPEN)) {
- local->config.authType = AUTH_ENCRYPT;
- }
- } else {
- /* Do we want to just set the transmit key index ? */
- int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
- if (valid_index(local, index)) {
- rc = set_wep_tx_idx(local, index, perm, 1);
- if (rc < 0) {
- airo_print_err(local->dev->name, "failed to set"
- " WEP transmit index to %d: %d.",
- index, rc);
- return rc;
- }
- } else {
- /* Don't complain if only change the mode */
- if (!(dwrq->flags & IW_ENCODE_MODE))
- return -EINVAL;
- }
- }
- /* Read the flags */
- if(dwrq->flags & IW_ENCODE_DISABLED)
- local->config.authType = AUTH_OPEN; // disable encryption
- if(dwrq->flags & IW_ENCODE_RESTRICTED)
- local->config.authType = AUTH_SHAREDKEY; // Only Both
- if(dwrq->flags & IW_ENCODE_OPEN)
- local->config.authType = AUTH_ENCRYPT; // Only Wep
- /* Commit the changes to flags if needed */
- if (local->config.authType != currentAuthType)
- set_bit (FLAG_COMMIT, &local->flags);
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Encryption Key
- */
-static int airo_get_encode(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
- u8 buf[16];
-
- if (!local->wep_capable)
- return -EOPNOTSUPP;
-
- readConfigRid(local, 1);
-
- /* Check encryption mode */
- switch(local->config.authType) {
- case AUTH_ENCRYPT:
- dwrq->flags = IW_ENCODE_OPEN;
- break;
- case AUTH_SHAREDKEY:
- dwrq->flags = IW_ENCODE_RESTRICTED;
- break;
- default:
- case AUTH_OPEN:
- dwrq->flags = IW_ENCODE_DISABLED;
- break;
- }
- /* We can't return the key, so set the proper flag and return zero */
- dwrq->flags |= IW_ENCODE_NOKEY;
- memset(extra, 0, 16);
-
- /* Which key do we want ? -1 -> tx index */
- if (!valid_index(local, index)) {
- index = get_wep_tx_idx(local);
- if (index < 0)
- index = 0;
- }
- dwrq->flags |= index + 1;
-
- /* Copy the key to the user buffer */
- dwrq->length = get_wep_key(local, index, &buf[0], sizeof(buf));
- if (dwrq->length != -1)
- memcpy(extra, buf, dwrq->length);
- else
- dwrq->length = 0;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set extended Encryption parameters
- */
-static int airo_set_encodeext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- struct iw_point *encoding = &wrqu->encoding;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int perm = ( encoding->flags & IW_ENCODE_TEMP ? 0 : 1 );
- __le16 currentAuthType = local->config.authType;
- int idx, key_len, alg = ext->alg, set_key = 1, rc;
- wep_key_t key;
-
- if (!local->wep_capable)
- return -EOPNOTSUPP;
-
- readConfigRid(local, 1);
-
- /* Determine and validate the key index */
- idx = encoding->flags & IW_ENCODE_INDEX;
- if (idx) {
- if (!valid_index(local, idx - 1))
- return -EINVAL;
- idx--;
- } else {
- idx = get_wep_tx_idx(local);
- if (idx < 0)
- idx = 0;
- }
-
- if (encoding->flags & IW_ENCODE_DISABLED)
- alg = IW_ENCODE_ALG_NONE;
-
- if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- /* Only set transmit key index here, actual
- * key is set below if needed.
- */
- rc = set_wep_tx_idx(local, idx, perm, 1);
- if (rc < 0) {
- airo_print_err(local->dev->name, "failed to set "
- "WEP transmit index to %d: %d.",
- idx, rc);
- return rc;
- }
- set_key = ext->key_len > 0 ? 1 : 0;
- }
-
- if (set_key) {
- /* Set the requested key first */
- memset(key.key, 0, MAX_KEY_SIZE);
- switch (alg) {
- case IW_ENCODE_ALG_NONE:
- key.len = 0;
- break;
- case IW_ENCODE_ALG_WEP:
- if (ext->key_len > MIN_KEY_SIZE) {
- key.len = MAX_KEY_SIZE;
- } else if (ext->key_len > 0) {
- key.len = MIN_KEY_SIZE;
- } else {
- return -EINVAL;
- }
- key_len = min (ext->key_len, key.len);
- memcpy(key.key, ext->key, key_len);
- break;
- default:
- return -EINVAL;
- }
- /* Send the key to the card */
- rc = set_wep_key(local, idx, key.key, key.len, perm, 1);
- if (rc < 0) {
- airo_print_err(local->dev->name, "failed to set WEP key"
- " at index %d: %d.", idx, rc);
- return rc;
- }
- }
-
- /* Read the flags */
- if(encoding->flags & IW_ENCODE_DISABLED)
- local->config.authType = AUTH_OPEN; // disable encryption
- if(encoding->flags & IW_ENCODE_RESTRICTED)
- local->config.authType = AUTH_SHAREDKEY; // Only Both
- if(encoding->flags & IW_ENCODE_OPEN)
- local->config.authType = AUTH_ENCRYPT; // Only Wep
- /* Commit the changes to flags if needed */
- if (local->config.authType != currentAuthType)
- set_bit (FLAG_COMMIT, &local->flags);
-
- return -EINPROGRESS;
-}
-
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get extended Encryption parameters
- */
-static int airo_get_encodeext(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- struct iw_point *encoding = &wrqu->encoding;
- struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int idx, max_key_len;
- u8 buf[16];
-
- if (!local->wep_capable)
- return -EOPNOTSUPP;
-
- readConfigRid(local, 1);
-
- max_key_len = encoding->length - sizeof(*ext);
- if (max_key_len < 0)
- return -EINVAL;
-
- idx = encoding->flags & IW_ENCODE_INDEX;
- if (idx) {
- if (!valid_index(local, idx - 1))
- return -EINVAL;
- idx--;
- } else {
- idx = get_wep_tx_idx(local);
- if (idx < 0)
- idx = 0;
- }
-
- encoding->flags = idx + 1;
- memset(ext, 0, sizeof(*ext));
-
- /* Check encryption mode */
- switch(local->config.authType) {
- case AUTH_ENCRYPT:
- encoding->flags = IW_ENCODE_ALG_WEP | IW_ENCODE_ENABLED;
- break;
- case AUTH_SHAREDKEY:
- encoding->flags = IW_ENCODE_ALG_WEP | IW_ENCODE_ENABLED;
- break;
- default:
- case AUTH_OPEN:
- encoding->flags = IW_ENCODE_ALG_NONE | IW_ENCODE_DISABLED;
- break;
- }
- /* We can't return the key, so set the proper flag and return zero */
- encoding->flags |= IW_ENCODE_NOKEY;
- memset(extra, 0, 16);
-
- /* Copy the key to the user buffer */
- ext->key_len = get_wep_key(local, idx, &buf[0], sizeof(buf));
- if (ext->key_len != -1)
- memcpy(extra, buf, ext->key_len);
- else
- ext->key_len = 0;
-
- return 0;
-}
-
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set extended authentication parameters
- */
-static int airo_set_auth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- struct iw_param *param = &wrqu->param;
- __le16 currentAuthType = local->config.authType;
-
- switch (param->flags & IW_AUTH_INDEX) {
- case IW_AUTH_WPA_VERSION:
- case IW_AUTH_CIPHER_PAIRWISE:
- case IW_AUTH_CIPHER_GROUP:
- case IW_AUTH_KEY_MGMT:
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- case IW_AUTH_PRIVACY_INVOKED:
- /*
- * airo does not use these parameters
- */
- break;
-
- case IW_AUTH_DROP_UNENCRYPTED:
- if (param->value) {
- /* Only change auth type if unencrypted */
- if (currentAuthType == AUTH_OPEN)
- local->config.authType = AUTH_ENCRYPT;
- } else {
- local->config.authType = AUTH_OPEN;
- }
-
- /* Commit the changes to flags if needed */
- if (local->config.authType != currentAuthType)
- set_bit (FLAG_COMMIT, &local->flags);
- break;
-
- case IW_AUTH_80211_AUTH_ALG: {
- /* FIXME: What about AUTH_OPEN? This API seems to
- * disallow setting our auth to AUTH_OPEN.
- */
- if (param->value & IW_AUTH_ALG_SHARED_KEY) {
- local->config.authType = AUTH_SHAREDKEY;
- } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
- local->config.authType = AUTH_ENCRYPT;
- } else
- return -EINVAL;
-
- /* Commit the changes to flags if needed */
- if (local->config.authType != currentAuthType)
- set_bit (FLAG_COMMIT, &local->flags);
- break;
- }
-
- case IW_AUTH_WPA_ENABLED:
- /* Silently accept disable of WPA */
- if (param->value > 0)
- return -EOPNOTSUPP;
- break;
-
- default:
- return -EOPNOTSUPP;
- }
- return -EINPROGRESS;
-}
-
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get extended authentication parameters
- */
-static int airo_get_auth(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- struct iw_param *param = &wrqu->param;
- __le16 currentAuthType = local->config.authType;
-
- switch (param->flags & IW_AUTH_INDEX) {
- case IW_AUTH_DROP_UNENCRYPTED:
- switch (currentAuthType) {
- case AUTH_SHAREDKEY:
- case AUTH_ENCRYPT:
- param->value = 1;
- break;
- default:
- param->value = 0;
- break;
- }
- break;
-
- case IW_AUTH_80211_AUTH_ALG:
- switch (currentAuthType) {
- case AUTH_SHAREDKEY:
- param->value = IW_AUTH_ALG_SHARED_KEY;
- break;
- case AUTH_ENCRYPT:
- default:
- param->value = IW_AUTH_ALG_OPEN_SYSTEM;
- break;
- }
- break;
-
- case IW_AUTH_WPA_ENABLED:
- param->value = 0;
- break;
-
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Tx-Power
- */
-static int airo_set_txpow(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- CapabilityRid cap_rid; /* Card capability info */
- int i;
- int rc = -EINVAL;
- __le16 v = cpu_to_le16(vwrq->value);
-
- readCapabilityRid(local, &cap_rid, 1);
-
- if (vwrq->disabled) {
- set_bit (FLAG_RADIO_OFF, &local->flags);
- set_bit (FLAG_COMMIT, &local->flags);
- return -EINPROGRESS; /* Call commit handler */
- }
- if (vwrq->flags != IW_TXPOW_MWATT) {
- return -EINVAL;
- }
- clear_bit (FLAG_RADIO_OFF, &local->flags);
- for (i = 0; cap_rid.txPowerLevels[i] && (i < 8); i++)
- if (v == cap_rid.txPowerLevels[i]) {
- readConfigRid(local, 1);
- local->config.txPower = v;
- set_bit (FLAG_COMMIT, &local->flags);
- rc = -EINPROGRESS; /* Call commit handler */
- break;
- }
- return rc;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Tx-Power
- */
-static int airo_get_txpow(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- vwrq->value = le16_to_cpu(local->config.txPower);
- vwrq->fixed = 1; /* No power control */
- vwrq->disabled = test_bit(FLAG_RADIO_OFF, &local->flags);
- vwrq->flags = IW_TXPOW_MWATT;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Retry limits
- */
-static int airo_set_retry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- int rc = -EINVAL;
-
- if(vwrq->disabled) {
- return -EINVAL;
- }
- readConfigRid(local, 1);
- if(vwrq->flags & IW_RETRY_LIMIT) {
- __le16 v = cpu_to_le16(vwrq->value);
- if(vwrq->flags & IW_RETRY_LONG)
- local->config.longRetryLimit = v;
- else if (vwrq->flags & IW_RETRY_SHORT)
- local->config.shortRetryLimit = v;
- else {
- /* No modifier : set both */
- local->config.longRetryLimit = v;
- local->config.shortRetryLimit = v;
- }
- set_bit (FLAG_COMMIT, &local->flags);
- rc = -EINPROGRESS; /* Call commit handler */
- }
- if(vwrq->flags & IW_RETRY_LIFETIME) {
- local->config.txLifetime = cpu_to_le16(vwrq->value / 1024);
- set_bit (FLAG_COMMIT, &local->flags);
- rc = -EINPROGRESS; /* Call commit handler */
- }
- return rc;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Retry limits
- */
-static int airo_get_retry(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
-
- vwrq->disabled = 0; /* Can't be disabled */
-
- readConfigRid(local, 1);
- /* Note : by default, display the min retry number */
- if((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
- vwrq->flags = IW_RETRY_LIFETIME;
- vwrq->value = le16_to_cpu(local->config.txLifetime) * 1024;
- } else if((vwrq->flags & IW_RETRY_LONG)) {
- vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
- vwrq->value = le16_to_cpu(local->config.longRetryLimit);
- } else {
- vwrq->flags = IW_RETRY_LIMIT;
- vwrq->value = le16_to_cpu(local->config.shortRetryLimit);
- if(local->config.shortRetryLimit != local->config.longRetryLimit)
- vwrq->flags |= IW_RETRY_SHORT;
- }
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get range info
- */
-static int airo_get_range(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- struct iw_range *range = (struct iw_range *) extra;
- CapabilityRid cap_rid; /* Card capability info */
- int i;
- int k;
-
- readCapabilityRid(local, &cap_rid, 1);
-
- dwrq->length = sizeof(struct iw_range);
- memset(range, 0, sizeof(*range));
- range->min_nwid = 0x0000;
- range->max_nwid = 0x0000;
- range->num_channels = 14;
- /* Should be based on cap_rid.country to give only
- * what the current card support */
- k = 0;
- for(i = 0; i < 14; i++) {
- range->freq[k].i = i + 1; /* List index */
- range->freq[k].m = ieee80211_dsss_chan_to_freq(i + 1) * 100000;
- range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */
- }
- range->num_frequency = k;
-
- range->sensitivity = 65535;
-
- /* Hum... Should put the right values there */
- if (local->rssi)
- range->max_qual.qual = 100; /* % */
- else
- range->max_qual.qual = airo_get_max_quality(&cap_rid);
- range->max_qual.level = 0x100 - 120; /* -120 dBm */
- range->max_qual.noise = 0x100 - 120; /* -120 dBm */
-
- /* Experimental measurements - boundary 11/5.5 Mb/s */
- /* Note : with or without the (local->rssi), results
- * are somewhat different. - Jean II */
- if (local->rssi) {
- range->avg_qual.qual = 50; /* % */
- range->avg_qual.level = 0x100 - 70; /* -70 dBm */
- } else {
- range->avg_qual.qual = airo_get_avg_quality(&cap_rid);
- range->avg_qual.level = 0x100 - 80; /* -80 dBm */
- }
- range->avg_qual.noise = 0x100 - 85; /* -85 dBm */
-
- for(i = 0 ; i < 8 ; i++) {
- range->bitrate[i] = cap_rid.supportedRates[i] * 500000;
- if(range->bitrate[i] == 0)
- break;
- }
- range->num_bitrates = i;
-
- /* Set an indication of the max TCP throughput
- * in bit/s that we can expect using this interface.
- * May be use for QoS stuff... Jean II */
- if(i > 2)
- range->throughput = 5000 * 1000;
- else
- range->throughput = 1500 * 1000;
-
- range->min_rts = 0;
- range->max_rts = AIRO_DEF_MTU;
- range->min_frag = 256;
- range->max_frag = AIRO_DEF_MTU;
-
- if(cap_rid.softCap & cpu_to_le16(2)) {
- // WEP: RC4 40 bits
- range->encoding_size[0] = 5;
- // RC4 ~128 bits
- if (cap_rid.softCap & cpu_to_le16(0x100)) {
- range->encoding_size[1] = 13;
- range->num_encoding_sizes = 2;
- } else
- range->num_encoding_sizes = 1;
- range->max_encoding_tokens =
- cap_rid.softCap & cpu_to_le16(0x80) ? 4 : 1;
- } else {
- range->num_encoding_sizes = 0;
- range->max_encoding_tokens = 0;
- }
- range->min_pmp = 0;
- range->max_pmp = 5000000; /* 5 secs */
- range->min_pmt = 0;
- range->max_pmt = 65535 * 1024; /* ??? */
- range->pmp_flags = IW_POWER_PERIOD;
- range->pmt_flags = IW_POWER_TIMEOUT;
- range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
-
- /* Transmit Power - values are in mW */
- for(i = 0 ; i < 8 ; i++) {
- range->txpower[i] = le16_to_cpu(cap_rid.txPowerLevels[i]);
- if(range->txpower[i] == 0)
- break;
- }
- range->num_txpower = i;
- range->txpower_capa = IW_TXPOW_MWATT;
- range->we_version_source = 19;
- range->we_version_compiled = WIRELESS_EXT;
- range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
- range->retry_flags = IW_RETRY_LIMIT;
- range->r_time_flags = IW_RETRY_LIFETIME;
- range->min_retry = 1;
- range->max_retry = 65535;
- range->min_r_time = 1024;
- range->max_r_time = 65535 * 1024;
-
- /* Event capability (kernel + driver) */
- range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
- IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
- IW_EVENT_CAPA_MASK(SIOCGIWAP) |
- IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
- range->event_capa[1] = IW_EVENT_CAPA_K_1;
- range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVTXDROP);
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Power Management
- */
-static int airo_set_power(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- if (vwrq->disabled) {
- if (sniffing_mode(local))
- return -EINVAL;
- local->config.powerSaveMode = POWERSAVE_CAM;
- local->config.rmode &= ~RXMODE_MASK;
- local->config.rmode |= RXMODE_BC_MC_ADDR;
- set_bit (FLAG_COMMIT, &local->flags);
- return -EINPROGRESS; /* Call commit handler */
- }
- if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
- local->config.fastListenDelay = cpu_to_le16((vwrq->value + 500) / 1024);
- local->config.powerSaveMode = POWERSAVE_PSPCAM;
- set_bit (FLAG_COMMIT, &local->flags);
- } else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) {
- local->config.fastListenInterval =
- local->config.listenInterval =
- cpu_to_le16((vwrq->value + 500) / 1024);
- local->config.powerSaveMode = POWERSAVE_PSPCAM;
- set_bit (FLAG_COMMIT, &local->flags);
- }
- switch (vwrq->flags & IW_POWER_MODE) {
- case IW_POWER_UNICAST_R:
- if (sniffing_mode(local))
- return -EINVAL;
- local->config.rmode &= ~RXMODE_MASK;
- local->config.rmode |= RXMODE_ADDR;
- set_bit (FLAG_COMMIT, &local->flags);
- break;
- case IW_POWER_ALL_R:
- if (sniffing_mode(local))
- return -EINVAL;
- local->config.rmode &= ~RXMODE_MASK;
- local->config.rmode |= RXMODE_BC_MC_ADDR;
- set_bit (FLAG_COMMIT, &local->flags);
- case IW_POWER_ON:
- /* This is broken, fixme ;-) */
- break;
- default:
- return -EINVAL;
- }
- // Note : we may want to factor local->need_commit here
- // Note2 : may also want to factor RXMODE_RFMON test
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Power Management
- */
-static int airo_get_power(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- __le16 mode;
-
- readConfigRid(local, 1);
- mode = local->config.powerSaveMode;
- if ((vwrq->disabled = (mode == POWERSAVE_CAM)))
- return 0;
- if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
- vwrq->value = le16_to_cpu(local->config.fastListenDelay) * 1024;
- vwrq->flags = IW_POWER_TIMEOUT;
- } else {
- vwrq->value = le16_to_cpu(local->config.fastListenInterval) * 1024;
- vwrq->flags = IW_POWER_PERIOD;
- }
- if ((local->config.rmode & RXMODE_MASK) == RXMODE_ADDR)
- vwrq->flags |= IW_POWER_UNICAST_R;
- else
- vwrq->flags |= IW_POWER_ALL_R;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : set Sensitivity
- */
-static int airo_set_sens(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- local->config.rssiThreshold =
- cpu_to_le16(vwrq->disabled ? RSSI_DEFAULT : vwrq->value);
- set_bit (FLAG_COMMIT, &local->flags);
-
- return -EINPROGRESS; /* Call commit handler */
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get Sensitivity
- */
-static int airo_get_sens(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *vwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
-
- readConfigRid(local, 1);
- vwrq->value = le16_to_cpu(local->config.rssiThreshold);
- vwrq->disabled = (vwrq->value == 0);
- vwrq->fixed = 1;
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : get AP List
- * Note : this is deprecated in favor of IWSCAN
- */
-static int airo_get_aplist(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq,
- char *extra)
-{
- struct airo_info *local = dev->ml_priv;
- struct sockaddr *address = (struct sockaddr *) extra;
- struct iw_quality *qual;
- BSSListRid BSSList;
- int i;
- int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
-
- qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
- if (!qual)
- return -ENOMEM;
-
- for (i = 0; i < IW_MAX_AP; i++) {
- u16 dBm;
- if (readBSSListRid(local, loseSync, &BSSList))
- break;
- loseSync = 0;
- memcpy(address[i].sa_data, BSSList.bssid, ETH_ALEN);
- address[i].sa_family = ARPHRD_ETHER;
- dBm = le16_to_cpu(BSSList.dBm);
- if (local->rssi) {
- qual[i].level = 0x100 - dBm;
- qual[i].qual = airo_dbm_to_pct(local->rssi, dBm);
- qual[i].updated = IW_QUAL_QUAL_UPDATED
- | IW_QUAL_LEVEL_UPDATED
- | IW_QUAL_DBM;
- } else {
- qual[i].level = (dBm + 321) / 2;
- qual[i].qual = 0;
- qual[i].updated = IW_QUAL_QUAL_INVALID
- | IW_QUAL_LEVEL_UPDATED
- | IW_QUAL_DBM;
- }
- qual[i].noise = local->wstats.qual.noise;
- if (BSSList.index == cpu_to_le16(0xffff))
- break;
- }
- if (!i) {
- StatusRid status_rid; /* Card status info */
- readStatusRid(local, &status_rid, 1);
- for (i = 0;
- i < min(IW_MAX_AP, 4) &&
- (status_rid.bssid[i][0]
- & status_rid.bssid[i][1]
- & status_rid.bssid[i][2]
- & status_rid.bssid[i][3]
- & status_rid.bssid[i][4]
- & status_rid.bssid[i][5])!=0xff &&
- (status_rid.bssid[i][0]
- | status_rid.bssid[i][1]
- | status_rid.bssid[i][2]
- | status_rid.bssid[i][3]
- | status_rid.bssid[i][4]
- | status_rid.bssid[i][5]);
- i++) {
- memcpy(address[i].sa_data,
- status_rid.bssid[i], ETH_ALEN);
- address[i].sa_family = ARPHRD_ETHER;
- }
- } else {
- dwrq->flags = 1; /* Should be define'd */
- memcpy(extra + sizeof(struct sockaddr)*i,
- &qual, sizeof(struct iw_quality)*i);
- }
- dwrq->length = i;
-
- kfree(qual);
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : Initiate Scan
- */
-static int airo_set_scan(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq,
- char *extra)
-{
- struct airo_info *ai = dev->ml_priv;
- Cmd cmd;
- Resp rsp;
- int wake = 0;
-
- /* Note : you may have realised that, as this is a SET operation,
- * this is privileged and therefore a normal user can't
- * perform scanning.
- * This is not an error, while the device perform scanning,
- * traffic doesn't flow, so it's a perfect DoS...
- * Jean II */
- if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
-
- if (down_interruptible(&ai->sem))
- return -ERESTARTSYS;
-
- /* If there's already a scan in progress, don't
- * trigger another one. */
- if (ai->scan_timeout > 0)
- goto out;
-
- /* Initiate a scan command */
- ai->scan_timeout = RUN_AT(3*HZ);
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_LISTBSS;
- issuecommand(ai, &cmd, &rsp);
- wake = 1;
-
-out:
- up(&ai->sem);
- if (wake)
- wake_up_interruptible(&ai->thr_wait);
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Translate scan data returned from the card to a card independent
- * format that the Wireless Tools will understand - Jean II
- */
-static inline char *airo_translate_scan(struct net_device *dev,
- struct iw_request_info *info,
- char *current_ev,
- char *end_buf,
- BSSListRid *bss)
-{
- struct airo_info *ai = dev->ml_priv;
- struct iw_event iwe; /* Temporary buffer */
- __le16 capabilities;
- char * current_val; /* For rates */
- int i;
- char * buf;
- u16 dBm;
-
- /* First entry *MUST* be the AP MAC address */
- iwe.cmd = SIOCGIWAP;
- iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
- memcpy(iwe.u.ap_addr.sa_data, bss->bssid, ETH_ALEN);
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_ADDR_LEN);
-
- /* Other entries will be displayed in the order we give them */
-
- /* Add the ESSID */
- iwe.u.data.length = bss->ssidLen;
- if(iwe.u.data.length > 32)
- iwe.u.data.length = 32;
- iwe.cmd = SIOCGIWESSID;
- iwe.u.data.flags = 1;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, bss->ssid);
-
- /* Add mode */
- iwe.cmd = SIOCGIWMODE;
- capabilities = bss->cap;
- if(capabilities & (CAP_ESS | CAP_IBSS)) {
- if(capabilities & CAP_ESS)
- iwe.u.mode = IW_MODE_MASTER;
- else
- iwe.u.mode = IW_MODE_ADHOC;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_UINT_LEN);
- }
-
- /* Add frequency */
- iwe.cmd = SIOCGIWFREQ;
- iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
- iwe.u.freq.m = ieee80211_dsss_chan_to_freq(iwe.u.freq.m) * 100000;
- iwe.u.freq.e = 1;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_FREQ_LEN);
-
- dBm = le16_to_cpu(bss->dBm);
-
- /* Add quality statistics */
- iwe.cmd = IWEVQUAL;
- if (ai->rssi) {
- iwe.u.qual.level = 0x100 - dBm;
- iwe.u.qual.qual = airo_dbm_to_pct(ai->rssi, dBm);
- iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED
- | IW_QUAL_LEVEL_UPDATED
- | IW_QUAL_DBM;
- } else {
- iwe.u.qual.level = (dBm + 321) / 2;
- iwe.u.qual.qual = 0;
- iwe.u.qual.updated = IW_QUAL_QUAL_INVALID
- | IW_QUAL_LEVEL_UPDATED
- | IW_QUAL_DBM;
- }
- iwe.u.qual.noise = ai->wstats.qual.noise;
- current_ev = iwe_stream_add_event(info, current_ev, end_buf,
- &iwe, IW_EV_QUAL_LEN);
-
- /* Add encryption capability */
- iwe.cmd = SIOCGIWENCODE;
- if(capabilities & CAP_PRIVACY)
- iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
- else
- iwe.u.data.flags = IW_ENCODE_DISABLED;
- iwe.u.data.length = 0;
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, bss->ssid);
-
- /* Rate : stuffing multiple values in a single event require a bit
- * more of magic - Jean II */
- current_val = current_ev + iwe_stream_lcp_len(info);
-
- iwe.cmd = SIOCGIWRATE;
- /* Those two flags are ignored... */
- iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
- /* Max 8 values */
- for(i = 0 ; i < 8 ; i++) {
- /* NULL terminated */
- if(bss->rates[i] == 0)
- break;
- /* Bit rate given in 500 kb/s units (+ 0x80) */
- iwe.u.bitrate.value = ((bss->rates[i] & 0x7f) * 500000);
- /* Add new value to event */
- current_val = iwe_stream_add_value(info, current_ev,
- current_val, end_buf,
- &iwe, IW_EV_PARAM_LEN);
- }
- /* Check if we added any event */
- if ((current_val - current_ev) > iwe_stream_lcp_len(info))
- current_ev = current_val;
-
- /* Beacon interval */
- buf = kmalloc(30, GFP_KERNEL);
- if (buf) {
- iwe.cmd = IWEVCUSTOM;
- sprintf(buf, "bcn_int=%d", bss->beaconInterval);
- iwe.u.data.length = strlen(buf);
- current_ev = iwe_stream_add_point(info, current_ev, end_buf,
- &iwe, buf);
- kfree(buf);
- }
-
- /* Put WPA/RSN Information Elements into the event stream */
- if (test_bit(FLAG_WPA_CAPABLE, &ai->flags)) {
- unsigned int num_null_ies = 0;
- u16 length = sizeof (bss->extra.iep);
- u8 *ie = (void *)&bss->extra.iep;
-
- while ((length >= 2) && (num_null_ies < 2)) {
- if (2 + ie[1] > length) {
- /* Invalid element, don't continue parsing IE */
- break;
- }
-
- switch (ie[0]) {
- case WLAN_EID_SSID:
- /* Two zero-length SSID elements
- * mean we're done parsing elements */
- if (!ie[1])
- num_null_ies++;
- break;
-
- case WLAN_EID_GENERIC:
- if (ie[1] >= 4 &&
- ie[2] == 0x00 &&
- ie[3] == 0x50 &&
- ie[4] == 0xf2 &&
- ie[5] == 0x01) {
- iwe.cmd = IWEVGENIE;
- /* 64 is an arbitrary cut-off */
- iwe.u.data.length = min(ie[1] + 2,
- 64);
- current_ev = iwe_stream_add_point(
- info, current_ev,
- end_buf, &iwe, ie);
- }
- break;
-
- case WLAN_EID_RSN:
- iwe.cmd = IWEVGENIE;
- /* 64 is an arbitrary cut-off */
- iwe.u.data.length = min(ie[1] + 2, 64);
- current_ev = iwe_stream_add_point(
- info, current_ev, end_buf,
- &iwe, ie);
- break;
-
- default:
- break;
- }
-
- length -= 2 + ie[1];
- ie += 2 + ie[1];
- }
- }
- return current_ev;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Wireless Handler : Read Scan Results
- */
-static int airo_get_scan(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *dwrq,
- char *extra)
-{
- struct airo_info *ai = dev->ml_priv;
- BSSListElement *net;
- int err = 0;
- char *current_ev = extra;
-
- /* If a scan is in-progress, return -EAGAIN */
- if (ai->scan_timeout > 0)
- return -EAGAIN;
-
- if (down_interruptible(&ai->sem))
- return -EAGAIN;
-
- list_for_each_entry (net, &ai->network_list, list) {
- /* Translate to WE format this entry */
- current_ev = airo_translate_scan(dev, info, current_ev,
- extra + dwrq->length,
- &net->bss);
-
- /* Check if there is space for one more entry */
- if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) {
- /* Ask user space to try again with a bigger buffer */
- err = -E2BIG;
- goto out;
- }
- }
-
- /* Length of data */
- dwrq->length = (current_ev - extra);
- dwrq->flags = 0; /* todo */
-
-out:
- up(&ai->sem);
- return err;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Commit handler : called after a bunch of SET operations
- */
-static int airo_config_commit(struct net_device *dev,
- struct iw_request_info *info, /* NULL */
- void *zwrq, /* NULL */
- char *extra) /* NULL */
-{
- struct airo_info *local = dev->ml_priv;
-
- if (!test_bit (FLAG_COMMIT, &local->flags))
- return 0;
-
- /* Some of the "SET" function may have modified some of the
- * parameters. It's now time to commit them in the card */
- disable_MAC(local, 1);
- if (test_bit (FLAG_RESET, &local->flags)) {
- APListRid APList_rid;
- SsidRid SSID_rid;
-
- readAPListRid(local, &APList_rid);
- readSsidRid(local, &SSID_rid);
- if (test_bit(FLAG_MPI,&local->flags))
- setup_card(local, dev->dev_addr, 1 );
- else
- reset_airo_card(dev);
- disable_MAC(local, 1);
- writeSsidRid(local, &SSID_rid, 1);
- writeAPListRid(local, &APList_rid, 1);
- }
- if (down_interruptible(&local->sem))
- return -ERESTARTSYS;
- writeConfigRid(local, 0);
- enable_MAC(local, 0);
- if (test_bit (FLAG_RESET, &local->flags))
- airo_set_promisc(local);
- else
- up(&local->sem);
-
- return 0;
-}
-
-/*------------------------------------------------------------------*/
-/*
- * Structures to export the Wireless Handlers
- */
-
-static const struct iw_priv_args airo_private_args[] = {
-/*{ cmd, set_args, get_args, name } */
- { AIROIOCTL, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof (aironet_ioctl),
- IW_PRIV_TYPE_BYTE | 2047, "airoioctl" },
- { AIROIDIFC, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | sizeof (aironet_ioctl),
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "airoidifc" },
-};
-
-static const iw_handler airo_handler[] =
-{
- (iw_handler) airo_config_commit, /* SIOCSIWCOMMIT */
- (iw_handler) airo_get_name, /* SIOCGIWNAME */
- (iw_handler) NULL, /* SIOCSIWNWID */
- (iw_handler) NULL, /* SIOCGIWNWID */
- (iw_handler) airo_set_freq, /* SIOCSIWFREQ */
- (iw_handler) airo_get_freq, /* SIOCGIWFREQ */
- (iw_handler) airo_set_mode, /* SIOCSIWMODE */
- (iw_handler) airo_get_mode, /* SIOCGIWMODE */
- (iw_handler) airo_set_sens, /* SIOCSIWSENS */
- (iw_handler) airo_get_sens, /* SIOCGIWSENS */
- (iw_handler) NULL, /* SIOCSIWRANGE */
- (iw_handler) airo_get_range, /* SIOCGIWRANGE */
- (iw_handler) NULL, /* SIOCSIWPRIV */
- (iw_handler) NULL, /* SIOCGIWPRIV */
- (iw_handler) NULL, /* SIOCSIWSTATS */
- (iw_handler) NULL, /* SIOCGIWSTATS */
- iw_handler_set_spy, /* SIOCSIWSPY */
- iw_handler_get_spy, /* SIOCGIWSPY */
- iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
- iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
- (iw_handler) airo_set_wap, /* SIOCSIWAP */
- (iw_handler) airo_get_wap, /* SIOCGIWAP */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) airo_get_aplist, /* SIOCGIWAPLIST */
- (iw_handler) airo_set_scan, /* SIOCSIWSCAN */
- (iw_handler) airo_get_scan, /* SIOCGIWSCAN */
- (iw_handler) airo_set_essid, /* SIOCSIWESSID */
- (iw_handler) airo_get_essid, /* SIOCGIWESSID */
- (iw_handler) airo_set_nick, /* SIOCSIWNICKN */
- (iw_handler) airo_get_nick, /* SIOCGIWNICKN */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) airo_set_rate, /* SIOCSIWRATE */
- (iw_handler) airo_get_rate, /* SIOCGIWRATE */
- (iw_handler) airo_set_rts, /* SIOCSIWRTS */
- (iw_handler) airo_get_rts, /* SIOCGIWRTS */
- (iw_handler) airo_set_frag, /* SIOCSIWFRAG */
- (iw_handler) airo_get_frag, /* SIOCGIWFRAG */
- (iw_handler) airo_set_txpow, /* SIOCSIWTXPOW */
- (iw_handler) airo_get_txpow, /* SIOCGIWTXPOW */
- (iw_handler) airo_set_retry, /* SIOCSIWRETRY */
- (iw_handler) airo_get_retry, /* SIOCGIWRETRY */
- (iw_handler) airo_set_encode, /* SIOCSIWENCODE */
- (iw_handler) airo_get_encode, /* SIOCGIWENCODE */
- (iw_handler) airo_set_power, /* SIOCSIWPOWER */
- (iw_handler) airo_get_power, /* SIOCGIWPOWER */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* SIOCSIWGENIE */
- (iw_handler) NULL, /* SIOCGIWGENIE */
- (iw_handler) airo_set_auth, /* SIOCSIWAUTH */
- (iw_handler) airo_get_auth, /* SIOCGIWAUTH */
- (iw_handler) airo_set_encodeext, /* SIOCSIWENCODEEXT */
- (iw_handler) airo_get_encodeext, /* SIOCGIWENCODEEXT */
- (iw_handler) NULL, /* SIOCSIWPMKSA */
-};
-
-/* Note : don't describe AIROIDIFC and AIROOLDIDIFC in here.
- * We want to force the use of the ioctl code, because those can't be
- * won't work the iw_handler code (because they simultaneously read
- * and write data and iw_handler can't do that).
- * Note that it's perfectly legal to read/write on a single ioctl command,
- * you just can't use iwpriv and need to force it via the ioctl handler.
- * Jean II */
-static const iw_handler airo_private_handler[] =
-{
- NULL, /* SIOCIWFIRSTPRIV */
-};
-
-static const struct iw_handler_def airo_handler_def =
-{
- .num_standard = ARRAY_SIZE(airo_handler),
- .num_private = ARRAY_SIZE(airo_private_handler),
- .num_private_args = ARRAY_SIZE(airo_private_args),
- .standard = airo_handler,
- .private = airo_private_handler,
- .private_args = airo_private_args,
- .get_wireless_stats = airo_get_wireless_stats,
-};
-
-/*
- * This defines the configuration part of the Wireless Extensions
- * Note : irq and spinlock protection will occur in the subroutines
- *
- * TODO :
- * o Check input value more carefully and fill correct values in range
- * o Test and shakeout the bugs (if any)
- *
- * Jean II
- *
- * Javier Achirica did a great job of merging code from the unnamed CISCO
- * developer that added support for flashing the card.
- */
-static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- int rc = 0;
- struct airo_info *ai = dev->ml_priv;
-
- if (ai->power.event)
- return 0;
-
- switch (cmd) {
-#ifdef CISCO_EXT
- case AIROIDIFC:
-#ifdef AIROOLDIDIFC
- case AIROOLDIDIFC:
-#endif
- {
- int val = AIROMAGIC;
- aironet_ioctl com;
- if (copy_from_user(&com,rq->ifr_data,sizeof(com)))
- rc = -EFAULT;
- else if (copy_to_user(com.data,(char *)&val,sizeof(val)))
- rc = -EFAULT;
- }
- break;
-
- case AIROIOCTL:
-#ifdef AIROOLDIOCTL
- case AIROOLDIOCTL:
-#endif
- /* Get the command struct and hand it off for evaluation by
- * the proper subfunction
- */
- {
- aironet_ioctl com;
- if (copy_from_user(&com,rq->ifr_data,sizeof(com))) {
- rc = -EFAULT;
- break;
- }
-
- /* Separate R/W functions bracket legality here
- */
- if ( com.command == AIRORSWVERSION ) {
- if (copy_to_user(com.data, swversion, sizeof(swversion)))
- rc = -EFAULT;
- else
- rc = 0;
- }
- else if ( com.command <= AIRORRID)
- rc = readrids(dev,&com);
- else if ( com.command >= AIROPCAP && com.command <= (AIROPLEAPUSR+2) )
- rc = writerids(dev,&com);
- else if ( com.command >= AIROFLSHRST && com.command <= AIRORESTART )
- rc = flashcard(dev,&com);
- else
- rc = -EINVAL; /* Bad command in ioctl */
- }
- break;
-#endif /* CISCO_EXT */
-
- // All other calls are currently unsupported
- default:
- rc = -EOPNOTSUPP;
- }
- return rc;
-}
-
-/*
- * Get the Wireless stats out of the driver
- * Note : irq and spinlock protection will occur in the subroutines
- *
- * TODO :
- * o Check if work in Ad-Hoc mode (otherwise, use SPY, as in wvlan_cs)
- *
- * Jean
- */
-static void airo_read_wireless_stats(struct airo_info *local)
-{
- StatusRid status_rid;
- StatsRid stats_rid;
- CapabilityRid cap_rid;
- __le32 *vals = stats_rid.vals;
-
- /* Get stats out of the card */
- clear_bit(JOB_WSTATS, &local->jobs);
- if (local->power.event) {
- up(&local->sem);
- return;
- }
- readCapabilityRid(local, &cap_rid, 0);
- readStatusRid(local, &status_rid, 0);
- readStatsRid(local, &stats_rid, RID_STATS, 0);
- up(&local->sem);
-
- /* The status */
- local->wstats.status = le16_to_cpu(status_rid.mode);
-
- /* Signal quality and co */
- if (local->rssi) {
- local->wstats.qual.level =
- airo_rssi_to_dbm(local->rssi,
- le16_to_cpu(status_rid.sigQuality));
- /* normalizedSignalStrength appears to be a percentage */
- local->wstats.qual.qual =
- le16_to_cpu(status_rid.normalizedSignalStrength);
- } else {
- local->wstats.qual.level =
- (le16_to_cpu(status_rid.normalizedSignalStrength) + 321) / 2;
- local->wstats.qual.qual = airo_get_quality(&status_rid, &cap_rid);
- }
- if (le16_to_cpu(status_rid.len) >= 124) {
- local->wstats.qual.noise = 0x100 - status_rid.noisedBm;
- local->wstats.qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
- } else {
- local->wstats.qual.noise = 0;
- local->wstats.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID | IW_QUAL_DBM;
- }
-
- /* Packets discarded in the wireless adapter due to wireless
- * specific problems */
- local->wstats.discard.nwid = le32_to_cpu(vals[56]) +
- le32_to_cpu(vals[57]) +
- le32_to_cpu(vals[58]); /* SSID Mismatch */
- local->wstats.discard.code = le32_to_cpu(vals[6]);/* RxWepErr */
- local->wstats.discard.fragment = le32_to_cpu(vals[30]);
- local->wstats.discard.retries = le32_to_cpu(vals[10]);
- local->wstats.discard.misc = le32_to_cpu(vals[1]) +
- le32_to_cpu(vals[32]);
- local->wstats.miss.beacon = le32_to_cpu(vals[34]);
-}
-
-static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
-{
- struct airo_info *local = dev->ml_priv;
-
- if (!test_bit(JOB_WSTATS, &local->jobs)) {
- /* Get stats out of the card if available */
- if (down_trylock(&local->sem) != 0) {
- set_bit(JOB_WSTATS, &local->jobs);
- wake_up_interruptible(&local->thr_wait);
- } else
- airo_read_wireless_stats(local);
- }
-
- return &local->wstats;
-}
-
-#ifdef CISCO_EXT
-/*
- * This just translates from driver IOCTL codes to the command codes to
- * feed to the radio's host interface. Things can be added/deleted
- * as needed. This represents the READ side of control I/O to
- * the card
- */
-static int readrids(struct net_device *dev, aironet_ioctl *comp) {
- unsigned short ridcode;
- unsigned char *iobuf;
- int len;
- struct airo_info *ai = dev->ml_priv;
-
- if (test_bit(FLAG_FLASHING, &ai->flags))
- return -EIO;
-
- switch(comp->command)
- {
- case AIROGCAP: ridcode = RID_CAPABILITIES; break;
- case AIROGCFG: ridcode = RID_CONFIG;
- if (test_bit(FLAG_COMMIT, &ai->flags)) {
- disable_MAC (ai, 1);
- writeConfigRid (ai, 1);
- enable_MAC(ai, 1);
- }
- break;
- case AIROGSLIST: ridcode = RID_SSID; break;
- case AIROGVLIST: ridcode = RID_APLIST; break;
- case AIROGDRVNAM: ridcode = RID_DRVNAME; break;
- case AIROGEHTENC: ridcode = RID_ETHERENCAP; break;
- case AIROGWEPKTMP: ridcode = RID_WEP_TEMP;
- /* Only super-user can read WEP keys */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- break;
- case AIROGWEPKNV: ridcode = RID_WEP_PERM;
- /* Only super-user can read WEP keys */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- break;
- case AIROGSTAT: ridcode = RID_STATUS; break;
- case AIROGSTATSD32: ridcode = RID_STATSDELTA; break;
- case AIROGSTATSC32: ridcode = RID_STATS; break;
- case AIROGMICSTATS:
- if (copy_to_user(comp->data, &ai->micstats,
- min((int)comp->len,(int)sizeof(ai->micstats))))
- return -EFAULT;
- return 0;
- case AIRORRID: ridcode = comp->ridnum; break;
- default:
- return -EINVAL;
- break;
- }
-
- if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
- return -ENOMEM;
-
- PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
- /* get the count of bytes in the rid docs say 1st 2 bytes is it.
- * then return it to the user
- * 9/22/2000 Honor user given length
- */
- len = comp->len;
-
- if (copy_to_user(comp->data, iobuf, min(len, (int)RIDSIZE))) {
- kfree (iobuf);
- return -EFAULT;
- }
- kfree (iobuf);
- return 0;
-}
-
-/*
- * Danger Will Robinson write the rids here
- */
-
-static int writerids(struct net_device *dev, aironet_ioctl *comp) {
- struct airo_info *ai = dev->ml_priv;
- int ridcode;
- int enabled;
- static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
- unsigned char *iobuf;
-
- /* Only super-user can write RIDs */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (test_bit(FLAG_FLASHING, &ai->flags))
- return -EIO;
-
- ridcode = 0;
- writer = do_writerid;
-
- switch(comp->command)
- {
- case AIROPSIDS: ridcode = RID_SSID; break;
- case AIROPCAP: ridcode = RID_CAPABILITIES; break;
- case AIROPAPLIST: ridcode = RID_APLIST; break;
- case AIROPCFG: ai->config.len = 0;
- clear_bit(FLAG_COMMIT, &ai->flags);
- ridcode = RID_CONFIG; break;
- case AIROPWEPKEYNV: ridcode = RID_WEP_PERM; break;
- case AIROPLEAPUSR: ridcode = RID_LEAPUSERNAME; break;
- case AIROPLEAPPWD: ridcode = RID_LEAPPASSWORD; break;
- case AIROPWEPKEY: ridcode = RID_WEP_TEMP; writer = PC4500_writerid;
- break;
- case AIROPLEAPUSR+1: ridcode = 0xFF2A; break;
- case AIROPLEAPUSR+2: ridcode = 0xFF2B; break;
-
- /* this is not really a rid but a command given to the card
- * same with MAC off
- */
- case AIROPMACON:
- if (enable_MAC(ai, 1) != 0)
- return -EIO;
- return 0;
-
- /*
- * Evidently this code in the airo driver does not get a symbol
- * as disable_MAC. it's probably so short the compiler does not gen one.
- */
- case AIROPMACOFF:
- disable_MAC(ai, 1);
- return 0;
-
- /* This command merely clears the counts does not actually store any data
- * only reads rid. But as it changes the cards state, I put it in the
- * writerid routines.
- */
- case AIROPSTCLR:
- if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
- return -ENOMEM;
-
- PC4500_readrid(ai,RID_STATSDELTACLEAR,iobuf,RIDSIZE, 1);
-
- enabled = ai->micstats.enabled;
- memset(&ai->micstats,0,sizeof(ai->micstats));
- ai->micstats.enabled = enabled;
-
- if (copy_to_user(comp->data, iobuf,
- min((int)comp->len, (int)RIDSIZE))) {
- kfree (iobuf);
- return -EFAULT;
- }
- kfree (iobuf);
- return 0;
-
- default:
- return -EOPNOTSUPP; /* Blarg! */
- }
- if(comp->len > RIDSIZE)
- return -EINVAL;
-
- if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
- return -ENOMEM;
-
- if (copy_from_user(iobuf,comp->data,comp->len)) {
- kfree (iobuf);
- return -EFAULT;
- }
-
- if (comp->command == AIROPCFG) {
- ConfigRid *cfg = (ConfigRid *)iobuf;
-
- if (test_bit(FLAG_MIC_CAPABLE, &ai->flags))
- cfg->opmode |= MODE_MIC;
-
- if ((cfg->opmode & MODE_CFG_MASK) == MODE_STA_IBSS)
- set_bit (FLAG_ADHOC, &ai->flags);
- else
- clear_bit (FLAG_ADHOC, &ai->flags);
- }
-
- if((*writer)(ai, ridcode, iobuf,comp->len,1)) {
- kfree (iobuf);
- return -EIO;
- }
- kfree (iobuf);
- return 0;
-}
-
-/*****************************************************************************
- * Ancillary flash / mod functions much black magic lurkes here *
- *****************************************************************************
- */
-
-/*
- * Flash command switch table
- */
-
-static int flashcard(struct net_device *dev, aironet_ioctl *comp) {
- int z;
-
- /* Only super-user can modify flash */
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- switch(comp->command)
- {
- case AIROFLSHRST:
- return cmdreset((struct airo_info *)dev->ml_priv);
-
- case AIROFLSHSTFL:
- if (!AIRO_FLASH(dev) &&
- (AIRO_FLASH(dev) = kmalloc(FLASHSIZE, GFP_KERNEL)) == NULL)
- return -ENOMEM;
- return setflashmode((struct airo_info *)dev->ml_priv);
-
- case AIROFLSHGCHR: /* Get char from aux */
- if(comp->len != sizeof(int))
- return -EINVAL;
- if (copy_from_user(&z,comp->data,comp->len))
- return -EFAULT;
- return flashgchar((struct airo_info *)dev->ml_priv, z, 8000);
-
- case AIROFLSHPCHR: /* Send char to card. */
- if(comp->len != sizeof(int))
- return -EINVAL;
- if (copy_from_user(&z,comp->data,comp->len))
- return -EFAULT;
- return flashpchar((struct airo_info *)dev->ml_priv, z, 8000);
-
- case AIROFLPUTBUF: /* Send 32k to card */
- if (!AIRO_FLASH(dev))
- return -ENOMEM;
- if(comp->len > FLASHSIZE)
- return -EINVAL;
- if (copy_from_user(AIRO_FLASH(dev), comp->data, comp->len))
- return -EFAULT;
-
- flashputbuf((struct airo_info *)dev->ml_priv);
- return 0;
-
- case AIRORESTART:
- if (flashrestart((struct airo_info *)dev->ml_priv, dev))
- return -EIO;
- return 0;
- }
- return -EINVAL;
-}
-
-#define FLASH_COMMAND 0x7e7e
-
-/*
- * STEP 1)
- * Disable MAC and do soft reset on
- * card.
- */
-
-static int cmdreset(struct airo_info *ai) {
- disable_MAC(ai, 1);
-
- if(!waitbusy (ai)){
- airo_print_info(ai->dev->name, "Waitbusy hang before RESET");
- return -EBUSY;
- }
-
- OUT4500(ai,COMMAND,CMD_SOFTRESET);
-
- ssleep(1); /* WAS 600 12/7/00 */
-
- if(!waitbusy (ai)){
- airo_print_info(ai->dev->name, "Waitbusy hang AFTER RESET");
- return -EBUSY;
- }
- return 0;
-}
-
-/* STEP 2)
- * Put the card in legendary flash
- * mode
- */
-
-static int setflashmode (struct airo_info *ai) {
- set_bit (FLAG_FLASHING, &ai->flags);
-
- OUT4500(ai, SWS0, FLASH_COMMAND);
- OUT4500(ai, SWS1, FLASH_COMMAND);
- if (probe) {
- OUT4500(ai, SWS0, FLASH_COMMAND);
- OUT4500(ai, COMMAND,0x10);
- } else {
- OUT4500(ai, SWS2, FLASH_COMMAND);
- OUT4500(ai, SWS3, FLASH_COMMAND);
- OUT4500(ai, COMMAND,0);
- }
- msleep(500); /* 500ms delay */
-
- if(!waitbusy(ai)) {
- clear_bit (FLAG_FLASHING, &ai->flags);
- airo_print_info(ai->dev->name, "Waitbusy hang after setflash mode");
- return -EIO;
- }
- return 0;
-}
-
-/* Put character to SWS0 wait for dwelltime
- * x 50us for echo .
- */
-
-static int flashpchar(struct airo_info *ai,int byte,int dwelltime) {
- int echo;
- int waittime;
-
- byte |= 0x8000;
-
- if(dwelltime == 0 )
- dwelltime = 200;
-
- waittime=dwelltime;
-
- /* Wait for busy bit d15 to go false indicating buffer empty */
- while ((IN4500 (ai, SWS0) & 0x8000) && waittime > 0) {
- udelay (50);
- waittime -= 50;
- }
-
- /* timeout for busy clear wait */
- if(waittime <= 0 ){
- airo_print_info(ai->dev->name, "flash putchar busywait timeout!");
- return -EBUSY;
- }
-
- /* Port is clear now write byte and wait for it to echo back */
- do {
- OUT4500(ai,SWS0,byte);
- udelay(50);
- dwelltime -= 50;
- echo = IN4500(ai,SWS1);
- } while (dwelltime >= 0 && echo != byte);
-
- OUT4500(ai,SWS1,0);
-
- return (echo == byte) ? 0 : -EIO;
-}
-
-/*
- * Get a character from the card matching matchbyte
- * Step 3)
- */
-static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime){
- int rchar;
- unsigned char rbyte=0;
-
- do {
- rchar = IN4500(ai,SWS1);
-
- if(dwelltime && !(0x8000 & rchar)){
- dwelltime -= 10;
- mdelay(10);
- continue;
- }
- rbyte = 0xff & rchar;
-
- if( (rbyte == matchbyte) && (0x8000 & rchar) ){
- OUT4500(ai,SWS1,0);
- return 0;
- }
- if( rbyte == 0x81 || rbyte == 0x82 || rbyte == 0x83 || rbyte == 0x1a || 0xffff == rchar)
- break;
- OUT4500(ai,SWS1,0);
-
- }while(dwelltime > 0);
- return -EIO;
-}
-
-/*
- * Transfer 32k of firmware data from user buffer to our buffer and
- * send to the card
- */
-
-static int flashputbuf(struct airo_info *ai){
- int nwords;
-
- /* Write stuff */
- if (test_bit(FLAG_MPI,&ai->flags))
- memcpy_toio(ai->pciaux + 0x8000, ai->flash, FLASHSIZE);
- else {
- OUT4500(ai,AUXPAGE,0x100);
- OUT4500(ai,AUXOFF,0);
-
- for(nwords=0;nwords != FLASHSIZE / 2;nwords++){
- OUT4500(ai,AUXDATA,ai->flash[nwords] & 0xffff);
- }
- }
- OUT4500(ai,SWS0,0x8000);
-
- return 0;
-}
-
-/*
- *
- */
-static int flashrestart(struct airo_info *ai,struct net_device *dev){
- int i,status;
-
- ssleep(1); /* Added 12/7/00 */
- clear_bit (FLAG_FLASHING, &ai->flags);
- if (test_bit(FLAG_MPI, &ai->flags)) {
- status = mpi_init_descriptors(ai);
- if (status != SUCCESS)
- return status;
- }
- status = setup_card(ai, dev->dev_addr, 1);
-
- if (!test_bit(FLAG_MPI,&ai->flags))
- for( i = 0; i < MAX_FIDS; i++ ) {
- ai->fids[i] = transmit_allocate
- ( ai, AIRO_DEF_MTU, i >= MAX_FIDS / 2 );
- }
-
- ssleep(1); /* Added 12/7/00 */
- return status;
-}
-#endif /* CISCO_EXT */
-
-/*
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- as published by the Free Software Foundation; either version 2
- of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- In addition:
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-*/
-
-module_init(airo_init_module);
-module_exit(airo_cleanup_module);
diff --git a/drivers/net/wireless/airo.h b/drivers/net/wireless/airo.h
deleted file mode 100644
index e480adf86be6..000000000000
--- a/drivers/net/wireless/airo.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _AIRO_H_
-#define _AIRO_H_
-
-struct net_device *init_airo_card(unsigned short irq, int port, int is_pcmcia,
- struct device *dmdev);
-int reset_airo_card(struct net_device *dev);
-void stop_airo_card(struct net_device *dev, int freeres);
-
-#endif /* _AIRO_H_ */
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
deleted file mode 100644
index d0593ed9170e..000000000000
--- a/drivers/net/wireless/airo_cs.c
+++ /dev/null
@@ -1,485 +0,0 @@
-/*======================================================================
-
- Aironet driver for 4500 and 4800 series cards
-
- This code is released under both the GPL version 2 and BSD licenses.
- Either license may be used. The respective licenses are found at
- the end of this file.
-
- This code was developed by Benjamin Reed <breed@users.sourceforge.net>
- including portions of which come from the Aironet PC4500
- Developer's Reference Manual and used with permission. Copyright
- (C) 1999 Benjamin Reed. All Rights Reserved. Permission to use
- code in the Developer's manual was granted for this driver by
- Aironet.
-
- In addition this module was derived from dummy_cs.
- The initial developer of dummy_cs is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
-
-======================================================================*/
-
-#ifdef __IN_PCMCIA_PACKAGE__
-#include <pcmcia/k_compat.h>
-#endif
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-#include <linux/netdevice.h>
-
-#include <pcmcia/cs_types.h>
-#include <pcmcia/cs.h>
-#include <pcmcia/cistpl.h>
-#include <pcmcia/cisreg.h>
-#include <pcmcia/ds.h>
-
-#include <linux/io.h>
-#include <asm/system.h>
-
-#include "airo.h"
-
-/*
- All the PCMCIA modules use PCMCIA_DEBUG to control debugging. If
- you do not define PCMCIA_DEBUG at all, all the debug code will be
- left out. If you compile with PCMCIA_DEBUG=0, the debug code will
- be present but disabled -- but it can then be enabled for specific
- modules at load time with a 'pc_debug=#' option to insmod.
-*/
-#ifdef PCMCIA_DEBUG
-static int pc_debug = PCMCIA_DEBUG;
-module_param(pc_debug, int, 0);
-static char *version = "$Revision: 1.2 $";
-#define DEBUG(n, args...) if (pc_debug > (n)) printk(KERN_DEBUG args);
-#else
-#define DEBUG(n, args...)
-#endif
-
-/*====================================================================*/
-
-MODULE_AUTHOR("Benjamin Reed");
-MODULE_DESCRIPTION("Support for Cisco/Aironet 802.11 wireless ethernet "
- "cards. This is the module that links the PCMCIA card "
- "with the airo module.");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340 PCMCIA cards");
-
-/*====================================================================*/
-
-/*
- The event() function is this driver's Card Services event handler.
- It will be called by Card Services when an appropriate card status
- event is received. The config() and release() entry points are
- used to configure or release a socket, in response to card
- insertion and ejection events. They are invoked from the airo_cs
- event handler.
-*/
-
-static int airo_config(struct pcmcia_device *link);
-static void airo_release(struct pcmcia_device *link);
-
-/*
- The attach() and detach() entry points are used to create and destroy
- "instances" of the driver, where each instance represents everything
- needed to manage one actual PCMCIA card.
-*/
-
-static void airo_detach(struct pcmcia_device *p_dev);
-
-/*
- You'll also need to prototype all the functions that will actually
- be used to talk to your device. See 'pcmem_cs' for a good example
- of a fully self-sufficient driver; the other drivers rely more or
- less on other parts of the kernel.
-*/
-
-/*
- A linked list of "instances" of the aironet device. Each actual
- PCMCIA card corresponds to one device instance, and is described
- by one struct pcmcia_device structure (defined in ds.h).
-
- You may not want to use a linked list for this -- for example, the
- memory card driver uses an array of struct pcmcia_device pointers,
- where minor device numbers are used to derive the corresponding
- array index.
-*/
-
-/*
- A driver needs to provide a dev_node_t structure for each device
- on a card. In some cases, there is only one device per card (for
- example, ethernet cards, modems). In other cases, there may be
- many actual or logical devices (SCSI adapters, memory cards with
- multiple partitions). The dev_node_t structures need to be kept
- in a linked list starting at the 'dev' field of a struct pcmcia_device
- structure. We allocate them in the card's private data structure,
- because they generally shouldn't be allocated dynamically.
-
- In this case, we also provide a flag to indicate if a device is
- "stopped" due to a power management event, or card ejection. The
- device IO routines can use a flag like this to throttle IO to a
- card that is not ready to accept it.
-*/
-
-typedef struct local_info_t {
- dev_node_t node;
- struct net_device *eth_dev;
-} local_info_t;
-
-/*======================================================================
-
- airo_attach() creates an "instance" of the driver, allocating
- local data structures for one device. The device is registered
- with Card Services.
-
- The dev_link structure is initialized, but we don't actually
- configure the card at this point -- we wait until we receive a
- card insertion event.
-
- ======================================================================*/
-
-static int airo_probe(struct pcmcia_device *p_dev)
-{
- local_info_t *local;
-
- DEBUG(0, "airo_attach()\n");
-
- /* Interrupt setup */
- p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
- p_dev->irq.IRQInfo1 = IRQ_LEVEL_ID;
- p_dev->irq.Handler = NULL;
-
- /*
- General socket configuration defaults can go here. In this
- client, we assume very little, and rely on the CIS for almost
- everything. In most clients, many details (i.e., number, sizes,
- and attributes of IO windows) are fixed by the nature of the
- device, and can be hard-wired here.
- */
- p_dev->conf.Attributes = 0;
- p_dev->conf.IntType = INT_MEMORY_AND_IO;
-
- /* Allocate space for private device-specific data */
- local = kzalloc(sizeof(local_info_t), GFP_KERNEL);
- if (!local) {
- printk(KERN_ERR "airo_cs: no memory for new device\n");
- return -ENOMEM;
- }
- p_dev->priv = local;
-
- return airo_config(p_dev);
-} /* airo_attach */
-
-/*======================================================================
-
- This deletes a driver "instance". The device is de-registered
- with Card Services. If it has been released, all local data
- structures are freed. Otherwise, the structures will be freed
- when the device is released.
-
- ======================================================================*/
-
-static void airo_detach(struct pcmcia_device *link)
-{
- DEBUG(0, "airo_detach(0x%p)\n", link);
-
- airo_release(link);
-
- if (((local_info_t *)link->priv)->eth_dev) {
- stop_airo_card(((local_info_t *)link->priv)->eth_dev, 0);
- }
- ((local_info_t *)link->priv)->eth_dev = NULL;
-
- kfree(link->priv);
-} /* airo_detach */
-
-/*======================================================================
-
- airo_config() is scheduled to run after a CARD_INSERTION event
- is received, to configure the PCMCIA socket, and to make the
- device available to the system.
-
- ======================================================================*/
-
-#define CS_CHECK(fn, ret) \
-do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
-
-static int airo_cs_config_check(struct pcmcia_device *p_dev,
- cistpl_cftable_entry_t *cfg,
- cistpl_cftable_entry_t *dflt,
- unsigned int vcc,
- void *priv_data)
-{
- win_req_t *req = priv_data;
-
- if (cfg->index == 0)
- return -ENODEV;
-
- /* Does this card need audio output? */
- if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
- p_dev->conf.Attributes |= CONF_ENABLE_SPKR;
- p_dev->conf.Status = CCSR_AUDIO_ENA;
- }
-
- /* Use power settings for Vcc and Vpp if present */
- /* Note that the CIS values need to be rescaled */
- if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM))
- p_dev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM]/10000;
- else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM))
- p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
-
- /* Do we need to allocate an interrupt? */
- if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
- p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
-
- /* IO window settings */
- p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
- if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) {
- cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io;
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
- if (!(io->flags & CISTPL_IO_8BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
- if (!(io->flags & CISTPL_IO_16BIT))
- p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
- p_dev->io.BasePort1 = io->win[0].base;
- p_dev->io.NumPorts1 = io->win[0].len;
- if (io->nwin > 1) {
- p_dev->io.Attributes2 = p_dev->io.Attributes1;
- p_dev->io.BasePort2 = io->win[1].base;
- p_dev->io.NumPorts2 = io->win[1].len;
- }
- }
-
- /* This reserves IO space but doesn't actually enable it */
- if (pcmcia_request_io(p_dev, &p_dev->io) != 0)
- return -ENODEV;
-
- /*
- Now set up a common memory window, if needed. There is room
- in the struct pcmcia_device structure for one memory window handle,
- but if the base addresses need to be saved, or if multiple
- windows are needed, the info should go in the private data
- structure for this device.
-
- Note that the memory window base is a physical address, and
- needs to be mapped to virtual space with ioremap() before it
- is used.
- */
- if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) {
- cistpl_mem_t *mem = (cfg->mem.nwin) ? &cfg->mem : &dflt->mem;
- memreq_t map;
- req->Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM;
- req->Base = mem->win[0].host_addr;
- req->Size = mem->win[0].len;
- req->AccessSpeed = 0;
- if (pcmcia_request_window(&p_dev, req, &p_dev->win) != 0)
- return -ENODEV;
- map.Page = 0;
- map.CardOffset = mem->win[0].card_addr;
- if (pcmcia_map_mem_page(p_dev->win, &map) != 0)
- return -ENODEV;
- }
- /* If we got this far, we're cool! */
- return 0;
-}
-
-
-static int airo_config(struct pcmcia_device *link)
-{
- local_info_t *dev;
- win_req_t *req;
- int last_fn, last_ret;
-
- dev = link->priv;
-
- DEBUG(0, "airo_config(0x%p)\n", link);
-
- req = kzalloc(sizeof(win_req_t), GFP_KERNEL);
- if (!req)
- return -ENOMEM;
-
- /*
- * In this loop, we scan the CIS for configuration table
- * entries, each of which describes a valid card
- * configuration, including voltage, IO window, memory window,
- * and interrupt settings.
- *
- * We make no assumptions about the card to be configured: we
- * use just the information available in the CIS. In an ideal
- * world, this would work for any PCMCIA card, but it requires
- * a complete and accurate CIS. In practice, a driver usually
- * "knows" most of these things without consulting the CIS,
- * and most client drivers will only use the CIS to fill in
- * implementation-defined details.
- */
- last_ret = pcmcia_loop_config(link, airo_cs_config_check, req);
- if (last_ret)
- goto failed;
-
- /*
- Allocate an interrupt line. Note that this does not assign a
- handler to the interrupt, unless the 'Handler' member of the
- irq structure is initialized.
- */
- if (link->conf.Attributes & CONF_ENABLE_IRQ)
- CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
-
- /*
- This actually configures the PCMCIA socket -- setting up
- the I/O windows and the interrupt mapping, and putting the
- card and host interface into "Memory and IO" mode.
- */
- CS_CHECK(RequestConfiguration,
- pcmcia_request_configuration(link, &link->conf));
- ((local_info_t *)link->priv)->eth_dev =
- init_airo_card(link->irq.AssignedIRQ,
- link->io.BasePort1, 1, &handle_to_dev(link));
- if (!((local_info_t *)link->priv)->eth_dev)
- goto cs_failed;
-
- /*
- At this point, the dev_node_t structure(s) need to be
- initialized and arranged in a linked list at link->dev_node.
- */
- strcpy(dev->node.dev_name, ((local_info_t *)link->priv)->eth_dev->name);
- dev->node.major = dev->node.minor = 0;
- link->dev_node = &dev->node;
-
- /* Finally, report what we've done */
- printk(KERN_INFO "%s: index 0x%02x: ",
- dev->node.dev_name, link->conf.ConfigIndex);
- if (link->conf.Vpp)
- printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
- if (link->conf.Attributes & CONF_ENABLE_IRQ)
- printk(", irq %d", link->irq.AssignedIRQ);
- if (link->io.NumPorts1)
- printk(", io 0x%04x-0x%04x", link->io.BasePort1,
- link->io.BasePort1+link->io.NumPorts1-1);
- if (link->io.NumPorts2)
- printk(" & 0x%04x-0x%04x", link->io.BasePort2,
- link->io.BasePort2+link->io.NumPorts2-1);
- if (link->win)
- printk(", mem 0x%06lx-0x%06lx", req->Base,
- req->Base+req->Size-1);
- printk("\n");
- kfree(req);
- return 0;
-
- cs_failed:
- cs_error(link, last_fn, last_ret);
- failed:
- airo_release(link);
- kfree(req);
- return -ENODEV;
-} /* airo_config */
-
-/*======================================================================
-
- After a card is removed, airo_release() will unregister the
- device, and release the PCMCIA configuration. If the device is
- still open, this will be postponed until it is closed.
-
- ======================================================================*/
-
-static void airo_release(struct pcmcia_device *link)
-{
- DEBUG(0, "airo_release(0x%p)\n", link);
- pcmcia_disable_device(link);
-}
-
-static int airo_suspend(struct pcmcia_device *link)
-{
- local_info_t *local = link->priv;
-
- netif_device_detach(local->eth_dev);
-
- return 0;
-}
-
-static int airo_resume(struct pcmcia_device *link)
-{
- local_info_t *local = link->priv;
-
- if (link->open) {
- reset_airo_card(local->eth_dev);
- netif_device_attach(local->eth_dev);
- }
-
- return 0;
-}
-
-static struct pcmcia_device_id airo_ids[] = {
- PCMCIA_DEVICE_MANF_CARD(0x015f, 0x000a),
- PCMCIA_DEVICE_MANF_CARD(0x015f, 0x0005),
- PCMCIA_DEVICE_MANF_CARD(0x015f, 0x0007),
- PCMCIA_DEVICE_MANF_CARD(0x0105, 0x0007),
- PCMCIA_DEVICE_NULL,
-};
-MODULE_DEVICE_TABLE(pcmcia, airo_ids);
-
-static struct pcmcia_driver airo_driver = {
- .owner = THIS_MODULE,
- .drv = {
- .name = "airo_cs",
- },
- .probe = airo_probe,
- .remove = airo_detach,
- .id_table = airo_ids,
- .suspend = airo_suspend,
- .resume = airo_resume,
-};
-
-static int airo_cs_init(void)
-{
- return pcmcia_register_driver(&airo_driver);
-}
-
-static void airo_cs_cleanup(void)
-{
- pcmcia_unregister_driver(&airo_driver);
-}
-
-/*
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License
- as published by the Free Software Foundation; either version 2
- of the License, or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- In addition:
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions
- are met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
- 3. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
- THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
- INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
- STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
- IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- POSSIBILITY OF SUCH DAMAGE.
-*/
-
-module_init(airo_cs_init);
-module_exit(airo_cs_cleanup);
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/net/wireless/arlan-main.c
deleted file mode 100644
index a54a67c425c8..000000000000
--- a/drivers/net/wireless/arlan-main.c
+++ /dev/null
@@ -1,1887 +0,0 @@
-/*
- * Copyright (C) 1997 Cullen Jennings
- * Copyright (C) 1998 Elmer Joandiu, elmer@ylenurme.ee
- * GNU General Public License applies
- * This module provides support for the Arlan 655 card made by Aironet
- */
-
-#include "arlan.h"
-
-#if BITS_PER_LONG != 32
-# error FIXME: this driver requires a 32-bit platform
-#endif
-
-static const char *arlan_version = "C.Jennigs 97 & Elmer.Joandi@ut.ee Oct'98, http://www.ylenurme.ee/~elmer/655/";
-
-struct net_device *arlan_device[MAX_ARLANS];
-
-static int SID = SIDUNKNOWN;
-static int radioNodeId = radioNodeIdUNKNOWN;
-static char encryptionKey[12] = {'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'};
-int arlan_debug = debugUNKNOWN;
-static int spreadingCode = spreadingCodeUNKNOWN;
-static int channelNumber = channelNumberUNKNOWN;
-static int channelSet = channelSetUNKNOWN;
-static int systemId = systemIdUNKNOWN;
-static int registrationMode = registrationModeUNKNOWN;
-static int keyStart;
-static int tx_delay_ms;
-static int retries = 5;
-static int tx_queue_len = 1;
-static int arlan_EEPROM_bad;
-
-#ifdef ARLAN_DEBUGGING
-
-static int testMemory = testMemoryUNKNOWN;
-static int irq = irqUNKNOWN;
-static int txScrambled = 1;
-static int mdebug;
-
-module_param(irq, int, 0);
-module_param(mdebug, int, 0);
-module_param(testMemory, int, 0);
-module_param(txScrambled, int, 0);
-MODULE_PARM_DESC(irq, "(unused)");
-MODULE_PARM_DESC(testMemory, "(unused)");
-MODULE_PARM_DESC(mdebug, "Arlan multicast debugging (0-1)");
-#endif
-
-module_param_named(debug, arlan_debug, int, 0);
-module_param(spreadingCode, int, 0);
-module_param(channelNumber, int, 0);
-module_param(channelSet, int, 0);
-module_param(systemId, int, 0);
-module_param(registrationMode, int, 0);
-module_param(radioNodeId, int, 0);
-module_param(SID, int, 0);
-module_param(keyStart, int, 0);
-module_param(tx_delay_ms, int, 0);
-module_param(retries, int, 0);
-module_param(tx_queue_len, int, 0);
-module_param_named(EEPROM_bad, arlan_EEPROM_bad, int, 0);
-MODULE_PARM_DESC(debug, "Arlan debug enable (0-1)");
-MODULE_PARM_DESC(retries, "Arlan maximum packet retransmisions");
-#ifdef ARLAN_ENTRY_EXIT_DEBUGGING
-static int arlan_entry_debug;
-static int arlan_exit_debug;
-static int arlan_entry_and_exit_debug;
-module_param_named(entry_debug, arlan_entry_debug, int, 0);
-module_param_named(exit_debug, arlan_exit_debug, int, 0);
-module_param_named(entry_and_exit_debug, arlan_entry_and_exit_debug, int, 0);
-MODULE_PARM_DESC(entry_debug, "Arlan driver function entry debugging");
-MODULE_PARM_DESC(exit_debug, "Arlan driver function exit debugging");
-MODULE_PARM_DESC(entry_and_exit_debug, "Arlan driver function entry and exit debugging");
-#endif
-
-struct arlan_conf_stru arlan_conf[MAX_ARLANS];
-static int arlans_found;
-
-static int arlan_open(struct net_device *dev);
-static int arlan_tx(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t arlan_interrupt(int irq, void *dev_id);
-static int arlan_close(struct net_device *dev);
-static struct net_device_stats *
- arlan_statistics (struct net_device *dev);
-static void arlan_set_multicast (struct net_device *dev);
-static int arlan_hw_tx (struct net_device* dev, char *buf, int length );
-static int arlan_hw_config (struct net_device * dev);
-static void arlan_tx_done_interrupt (struct net_device * dev, int status);
-static void arlan_rx_interrupt (struct net_device * dev, u_char rxStatus, u_short, u_short);
-static void arlan_process_interrupt (struct net_device * dev);
-static void arlan_tx_timeout (struct net_device *dev);
-
-static inline long us2ticks(int us)
-{
- return us * (1000000 / HZ);
-}
-
-
-#ifdef ARLAN_ENTRY_EXIT_DEBUGGING
-#define ARLAN_DEBUG_ENTRY(name) \
- {\
- struct timeval timev;\
- do_gettimeofday(&timev);\
- if (arlan_entry_debug || arlan_entry_and_exit_debug)\
- printk("--->>>" name " %ld " "\n",((long int) timev.tv_sec * 1000000 + timev.tv_usec));\
- }
-#define ARLAN_DEBUG_EXIT(name) \
- {\
- struct timeval timev;\
- do_gettimeofday(&timev);\
- if (arlan_exit_debug || arlan_entry_and_exit_debug)\
- printk("<<<---" name " %ld " "\n",((long int) timev.tv_sec * 1000000 + timev.tv_usec) );\
- }
-#else
-#define ARLAN_DEBUG_ENTRY(name)
-#define ARLAN_DEBUG_EXIT(name)
-#endif
-
-
-#define arlan_interrupt_ack(dev)\
- clearClearInterrupt(dev);\
- setClearInterrupt(dev);
-
-static inline int arlan_drop_tx(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- dev->stats.tx_errors++;
- if (priv->Conf->tx_delay_ms)
- {
- priv->tx_done_delayed = jiffies + priv->Conf->tx_delay_ms * HZ / 1000 + 1;
- }
- else
- {
- priv->waiting_command_mask &= ~ARLAN_COMMAND_TX;
- TXHEAD(dev).offset = 0;
- TXTAIL(dev).offset = 0;
- priv->txLast = 0;
- priv->bad = 0;
- if (!priv->under_reset && !priv->under_config)
- netif_wake_queue (dev);
- }
- return 1;
-}
-
-
-int arlan_command(struct net_device *dev, int command_p)
-{
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- struct arlan_conf_stru *conf = priv->Conf;
- int udelayed = 0;
- int i = 0;
- unsigned long flags;
-
- ARLAN_DEBUG_ENTRY("arlan_command");
-
- if (priv->card_polling_interval)
- priv->card_polling_interval = 1;
-
- if (arlan_debug & ARLAN_DEBUG_CHAIN_LOCKS)
- printk(KERN_DEBUG "arlan_command, %lx commandByte %x waiting %lx incoming %x \n",
- jiffies, READSHMB(arlan->commandByte),
- priv->waiting_command_mask, command_p);
-
- priv->waiting_command_mask |= command_p;
-
- if (priv->waiting_command_mask & ARLAN_COMMAND_RESET)
- if (time_after(jiffies, priv->lastReset + 5 * HZ))
- priv->waiting_command_mask &= ~ARLAN_COMMAND_RESET;
-
- if (priv->waiting_command_mask & ARLAN_COMMAND_INT_ACK)
- {
- arlan_interrupt_ack(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_INT_ACK;
- }
- if (priv->waiting_command_mask & ARLAN_COMMAND_INT_ENABLE)
- {
- setInterruptEnable(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_INT_ENABLE;
- }
-
- /* Card access serializing lock */
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Check cards status and waiting */
-
- if (priv->waiting_command_mask & (ARLAN_COMMAND_LONG_WAIT_NOW | ARLAN_COMMAND_WAIT_NOW))
- {
- while (priv->waiting_command_mask & (ARLAN_COMMAND_LONG_WAIT_NOW | ARLAN_COMMAND_WAIT_NOW))
- {
- if (READSHMB(arlan->resetFlag) ||
- READSHMB(arlan->commandByte)) /* ||
- (readControlRegister(dev) & ARLAN_ACCESS))
- */
- udelay(40);
- else
- priv->waiting_command_mask &= ~(ARLAN_COMMAND_LONG_WAIT_NOW | ARLAN_COMMAND_WAIT_NOW);
-
- udelayed++;
-
- if (priv->waiting_command_mask & ARLAN_COMMAND_LONG_WAIT_NOW)
- {
- if (udelayed * 40 > 1000000)
- {
- printk(KERN_ERR "%s long wait too long \n", dev->name);
- priv->waiting_command_mask |= ARLAN_COMMAND_RESET;
- break;
- }
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_WAIT_NOW)
- {
- if (udelayed * 40 > 1000)
- {
- printk(KERN_ERR "%s short wait too long \n", dev->name);
- goto bad_end;
- }
- }
- }
- }
- else
- {
- i = 0;
- while ((READSHMB(arlan->resetFlag) ||
- READSHMB(arlan->commandByte)) &&
- conf->pre_Command_Wait > (i++) * 10)
- udelay(10);
-
-
- if ((READSHMB(arlan->resetFlag) ||
- READSHMB(arlan->commandByte)) &&
- !(priv->waiting_command_mask & ARLAN_COMMAND_RESET))
- {
- goto card_busy_end;
- }
- }
- if (priv->waiting_command_mask & ARLAN_COMMAND_RESET)
- priv->under_reset = 1;
- if (priv->waiting_command_mask & ARLAN_COMMAND_CONF)
- priv->under_config = 1;
-
- /* Issuing command */
- arlan_lock_card_access(dev);
- if (priv->waiting_command_mask & ARLAN_COMMAND_POWERUP)
- {
- // if (readControlRegister(dev) & (ARLAN_ACCESS && ARLAN_POWER))
- setPowerOn(dev);
- arlan_interrupt_lancpu(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_POWERUP;
- priv->waiting_command_mask |= ARLAN_COMMAND_RESET;
- priv->card_polling_interval = HZ / 10;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_ACTIVATE)
- {
- WRITESHMB(arlan->commandByte, ARLAN_COM_ACTIVATE);
- arlan_interrupt_lancpu(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_ACTIVATE;
- priv->card_polling_interval = HZ / 10;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_RX_ABORT)
- {
- if (priv->rx_command_given)
- {
- WRITESHMB(arlan->commandByte, ARLAN_COM_RX_ABORT);
- arlan_interrupt_lancpu(dev);
- priv->rx_command_given = 0;
- }
- priv->waiting_command_mask &= ~ARLAN_COMMAND_RX_ABORT;
- priv->card_polling_interval = 1;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_TX_ABORT)
- {
- if (priv->tx_command_given)
- {
- WRITESHMB(arlan->commandByte, ARLAN_COM_TX_ABORT);
- arlan_interrupt_lancpu(dev);
- priv->tx_command_given = 0;
- }
- priv->waiting_command_mask &= ~ARLAN_COMMAND_TX_ABORT;
- priv->card_polling_interval = 1;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_RESET)
- {
- priv->under_reset=1;
- netif_stop_queue (dev);
-
- arlan_drop_tx(dev);
- if (priv->tx_command_given || priv->rx_command_given)
- {
- printk(KERN_ERR "%s: Reset under tx or rx command \n", dev->name);
- }
- netif_stop_queue (dev);
- if (arlan_debug & ARLAN_DEBUG_RESET)
- printk(KERN_ERR "%s: Doing chip reset\n", dev->name);
- priv->lastReset = jiffies;
- WRITESHM(arlan->commandByte, 0, u_char);
- /* hold card in reset state */
- setHardwareReset(dev);
- /* set reset flag and then release reset */
- WRITESHM(arlan->resetFlag, 0xff, u_char);
- clearChannelAttention(dev);
- clearHardwareReset(dev);
- priv->card_polling_interval = HZ / 4;
- priv->waiting_command_mask &= ~ARLAN_COMMAND_RESET;
- priv->waiting_command_mask |= ARLAN_COMMAND_INT_RACK;
-// priv->waiting_command_mask |= ARLAN_COMMAND_INT_RENABLE;
-// priv->waiting_command_mask |= ARLAN_COMMAND_RX;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_INT_RACK)
- {
- clearHardwareReset(dev);
- clearClearInterrupt(dev);
- setClearInterrupt(dev);
- setInterruptEnable(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_INT_RACK;
- priv->waiting_command_mask |= ARLAN_COMMAND_CONF;
- priv->under_config = 1;
- priv->under_reset = 0;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_INT_RENABLE)
- {
- setInterruptEnable(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_INT_RENABLE;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_CONF)
- {
- if (priv->tx_command_given || priv->rx_command_given)
- {
- printk(KERN_ERR "%s: Reset under tx or rx command \n", dev->name);
- }
- arlan_drop_tx(dev);
- setInterruptEnable(dev);
- arlan_hw_config(dev);
- arlan_interrupt_lancpu(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_CONF;
- priv->card_polling_interval = HZ / 10;
-// priv->waiting_command_mask |= ARLAN_COMMAND_INT_RACK;
-// priv->waiting_command_mask |= ARLAN_COMMAND_INT_ENABLE;
- priv->waiting_command_mask |= ARLAN_COMMAND_CONF_WAIT;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_CONF_WAIT)
- {
- if (READSHMB(arlan->configuredStatusFlag) != 0 &&
- READSHMB(arlan->diagnosticInfo) == 0xff)
- {
- priv->waiting_command_mask &= ~ARLAN_COMMAND_CONF_WAIT;
- priv->waiting_command_mask |= ARLAN_COMMAND_RX;
- priv->waiting_command_mask |= ARLAN_COMMAND_TBUSY_CLEAR;
- priv->card_polling_interval = HZ / 10;
- priv->tx_command_given = 0;
- priv->under_config = 0;
- }
- else
- {
- priv->card_polling_interval = 1;
- if (arlan_debug & ARLAN_DEBUG_TIMING)
- printk(KERN_ERR "configure delayed \n");
- }
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_RX)
- {
- if (!registrationBad(dev))
- {
- setInterruptEnable(dev);
- memset_io(arlan->commandParameter, 0, 0xf);
- WRITESHMB(arlan->commandByte, ARLAN_COM_INT | ARLAN_COM_RX_ENABLE);
- WRITESHMB(arlan->commandParameter[0], conf->rxParameter);
- arlan_interrupt_lancpu(dev);
- priv->rx_command_given = 0; // mnjah, bad
- priv->waiting_command_mask &= ~ARLAN_COMMAND_RX;
- priv->card_polling_interval = 1;
- }
- else
- priv->card_polling_interval = 2;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_TBUSY_CLEAR)
- {
- if ( !registrationBad(dev) &&
- (netif_queue_stopped(dev) || !netif_running(dev)) )
- {
- priv->waiting_command_mask &= ~ARLAN_COMMAND_TBUSY_CLEAR;
- netif_wake_queue (dev);
- }
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_TX)
- {
- if (!test_and_set_bit(0, (void *) &priv->tx_command_given))
- {
- if (time_after(jiffies,
- priv->tx_last_sent + us2ticks(conf->rx_tweak1))
- || time_before(jiffies,
- priv->last_rx_int_ack_time + us2ticks(conf->rx_tweak2)))
- {
- setInterruptEnable(dev);
- memset_io(arlan->commandParameter, 0, 0xf);
- WRITESHMB(arlan->commandByte, ARLAN_COM_TX_ENABLE | ARLAN_COM_INT);
- memcpy_toio(arlan->commandParameter, &TXLAST(dev), 14);
-// for ( i=1 ; i < 15 ; i++) printk("%02x:",READSHMB(arlan->commandParameter[i]));
- priv->tx_last_sent = jiffies;
- arlan_interrupt_lancpu(dev);
- priv->tx_command_given = 1;
- priv->waiting_command_mask &= ~ARLAN_COMMAND_TX;
- priv->card_polling_interval = 1;
- }
- else
- {
- priv->tx_command_given = 0;
- priv->card_polling_interval = 1;
- }
- }
- else if (arlan_debug & ARLAN_DEBUG_CHAIN_LOCKS)
- printk(KERN_ERR "tx command when tx chain locked \n");
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_NOOPINT)
- {
- {
- WRITESHMB(arlan->commandByte, ARLAN_COM_NOP | ARLAN_COM_INT);
- }
- arlan_interrupt_lancpu(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_NOOPINT;
- priv->card_polling_interval = HZ / 3;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_NOOP)
- {
- WRITESHMB(arlan->commandByte, ARLAN_COM_NOP);
- arlan_interrupt_lancpu(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_NOOP;
- priv->card_polling_interval = HZ / 3;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_SLOW_POLL)
- {
- WRITESHMB(arlan->commandByte, ARLAN_COM_GOTO_SLOW_POLL);
- arlan_interrupt_lancpu(dev);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_SLOW_POLL;
- priv->card_polling_interval = HZ / 3;
- }
- else if (priv->waiting_command_mask & ARLAN_COMMAND_POWERDOWN)
- {
- setPowerOff(dev);
- if (arlan_debug & ARLAN_DEBUG_CARD_STATE)
- printk(KERN_WARNING "%s: Arlan Going Standby\n", dev->name);
- priv->waiting_command_mask &= ~ARLAN_COMMAND_POWERDOWN;
- priv->card_polling_interval = 3 * HZ;
- }
- arlan_unlock_card_access(dev);
- for (i = 0; READSHMB(arlan->commandByte) && i < 20; i++)
- udelay(10);
- if (READSHMB(arlan->commandByte))
- if (arlan_debug & ARLAN_DEBUG_CARD_STATE)
- printk(KERN_ERR "card busy leaving command %lx\n", priv->waiting_command_mask);
-
- spin_unlock_irqrestore(&priv->lock, flags);
- ARLAN_DEBUG_EXIT("arlan_command");
- priv->last_command_buff_free_time = jiffies;
- return 0;
-
-card_busy_end:
- if (time_after(jiffies, priv->last_command_buff_free_time + HZ))
- priv->waiting_command_mask |= ARLAN_COMMAND_CLEAN_AND_RESET;
-
- if (arlan_debug & ARLAN_DEBUG_CARD_STATE)
- printk(KERN_ERR "%s arlan_command card busy end \n", dev->name);
- spin_unlock_irqrestore(&priv->lock, flags);
- ARLAN_DEBUG_EXIT("arlan_command");
- return 1;
-
-bad_end:
- printk(KERN_ERR "%s arlan_command bad end \n", dev->name);
-
- spin_unlock_irqrestore(&priv->lock, flags);
- ARLAN_DEBUG_EXIT("arlan_command");
-
- return -1;
-}
-
-static inline void arlan_command_process(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- int times = 0;
- while (priv->waiting_command_mask && times < 8)
- {
- if (priv->waiting_command_mask)
- {
- if (arlan_command(dev, 0))
- break;
- times++;
- }
- /* if long command, we won't repeat trying */ ;
- if (priv->card_polling_interval > 1)
- break;
- times++;
- }
-}
-
-
-static inline void arlan_retransmit_now(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
-
- ARLAN_DEBUG_ENTRY("arlan_retransmit_now");
- if (TXLAST(dev).offset == 0)
- {
- if (TXHEAD(dev).offset)
- {
- priv->txLast = 0;
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk(KERN_DEBUG "TX buff switch to head \n");
-
- }
- else if (TXTAIL(dev).offset)
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk(KERN_DEBUG "TX buff switch to tail \n");
- priv->txLast = 1;
- }
- else
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk(KERN_ERR "ReTransmit buff empty");
- netif_wake_queue (dev);
- return;
-
- }
- arlan_command(dev, ARLAN_COMMAND_TX);
-
- priv->Conf->driverRetransmissions++;
- priv->retransmissions++;
-
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk("Retransmit %d bytes \n", TXLAST(dev).length);
-
- ARLAN_DEBUG_EXIT("arlan_retransmit_now");
-}
-
-
-
-static void arlan_registration_timer(unsigned long data)
-{
- struct net_device *dev = (struct net_device *) data;
- struct arlan_private *priv = netdev_priv(dev);
- int bh_mark_needed = 0;
- int next_tick = 1;
- long lostTime = ((long)jiffies - (long)priv->registrationLastSeen)
- * (1000/HZ);
-
- if (registrationBad(dev))
- {
- priv->registrationLostCount++;
- if (lostTime > 7000 && lostTime < 7200)
- {
- printk(KERN_NOTICE "%s registration Lost \n", dev->name);
- }
- if (lostTime / priv->reRegisterExp > 2000)
- arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_CONF);
- if (lostTime / (priv->reRegisterExp) > 3500)
- arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_RESET);
- if (priv->reRegisterExp < 400)
- priv->reRegisterExp += 2;
- if (lostTime > 7200)
- {
- next_tick = HZ;
- arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_RESET);
- }
- }
- else
- {
- if (priv->Conf->registrationMode && lostTime > 10000 &&
- priv->registrationLostCount)
- {
- printk(KERN_NOTICE "%s registration is back after %ld milliseconds\n",
- dev->name, lostTime);
- }
- priv->registrationLastSeen = jiffies;
- priv->registrationLostCount = 0;
- priv->reRegisterExp = 1;
- if (!netif_running(dev) )
- netif_wake_queue(dev);
- if (time_after(priv->tx_last_sent,priv->tx_last_cleared) &&
- time_after(jiffies, priv->tx_last_sent * 5*HZ) ){
- arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_RESET);
- priv->tx_last_cleared = jiffies;
- }
- }
-
-
- if (!registrationBad(dev) && priv->ReTransmitRequested)
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk(KERN_ERR "Retransmit from timer \n");
- priv->ReTransmitRequested = 0;
- arlan_retransmit_now(dev);
- }
- if (!registrationBad(dev) &&
- time_after(jiffies, priv->tx_done_delayed) &&
- priv->tx_done_delayed != 0)
- {
- TXLAST(dev).offset = 0;
- if (priv->txLast)
- priv->txLast = 0;
- else if (TXTAIL(dev).offset)
- priv->txLast = 1;
- if (TXLAST(dev).offset)
- {
- arlan_retransmit_now(dev);
- dev->trans_start = jiffies;
- }
- if (!(TXHEAD(dev).offset && TXTAIL(dev).offset))
- {
- netif_wake_queue (dev);
- }
- priv->tx_done_delayed = 0;
- bh_mark_needed = 1;
- }
- if (bh_mark_needed)
- {
- netif_wake_queue (dev);
- }
- arlan_process_interrupt(dev);
-
- if (next_tick < priv->card_polling_interval)
- next_tick = priv->card_polling_interval;
-
- priv->timer.expires = jiffies + next_tick;
-
- add_timer(&priv->timer);
-}
-
-
-#ifdef ARLAN_DEBUGGING
-
-static void arlan_print_registers(struct net_device *dev, int line)
-{
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem *arlan = priv->card;
-
- u_char hostcpuLock, lancpuLock, controlRegister, cntrlRegImage,
- txStatus, rxStatus, interruptInProgress, commandByte;
-
-
- ARLAN_DEBUG_ENTRY("arlan_print_registers");
- READSHM(interruptInProgress, arlan->interruptInProgress, u_char);
- READSHM(hostcpuLock, arlan->hostcpuLock, u_char);
- READSHM(lancpuLock, arlan->lancpuLock, u_char);
- READSHM(controlRegister, arlan->controlRegister, u_char);
- READSHM(cntrlRegImage, arlan->cntrlRegImage, u_char);
- READSHM(txStatus, arlan->txStatus, u_char);
- READSHM(rxStatus, arlan->rxStatus, u_char);
- READSHM(commandByte, arlan->commandByte, u_char);
-
- printk(KERN_WARNING "line %04d IP %02x HL %02x LL %02x CB %02x CR %02x CRI %02x TX %02x RX %02x\n",
- line, interruptInProgress, hostcpuLock, lancpuLock, commandByte,
- controlRegister, cntrlRegImage, txStatus, rxStatus);
-
- ARLAN_DEBUG_EXIT("arlan_print_registers");
-}
-#endif
-
-
-static int arlan_hw_tx(struct net_device *dev, char *buf, int length)
-{
- int i;
-
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- struct arlan_conf_stru *conf = priv->Conf;
-
- int tailStarts = 0x800;
- int headEnds = 0x0;
-
-
- ARLAN_DEBUG_ENTRY("arlan_hw_tx");
- if (TXHEAD(dev).offset)
- headEnds = (((TXHEAD(dev).offset + TXHEAD(dev).length - offsetof(struct arlan_shmem, txBuffer)) / 64) + 1) * 64;
- if (TXTAIL(dev).offset)
- tailStarts = 0x800 - (((TXTAIL(dev).offset - offsetof(struct arlan_shmem, txBuffer)) / 64) + 2) * 64;
-
-
- if (!TXHEAD(dev).offset && length < tailStarts)
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk(KERN_ERR "TXHEAD insert, tailStart %d\n", tailStarts);
-
- TXHEAD(dev).offset =
- offsetof(struct arlan_shmem, txBuffer);
- TXHEAD(dev).length = length - ARLAN_FAKE_HDR_LEN;
- for (i = 0; i < 6; i++)
- TXHEAD(dev).dest[i] = buf[i];
- TXHEAD(dev).clear = conf->txClear;
- TXHEAD(dev).retries = conf->txRetries; /* 0 is use default */
- TXHEAD(dev).routing = conf->txRouting;
- TXHEAD(dev).scrambled = conf->txScrambled;
- memcpy_toio((char __iomem *)arlan + TXHEAD(dev).offset, buf + ARLAN_FAKE_HDR_LEN, TXHEAD(dev).length);
- }
- else if (!TXTAIL(dev).offset && length < (0x800 - headEnds))
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk(KERN_ERR "TXTAIL insert, headEnd %d\n", headEnds);
-
- TXTAIL(dev).offset =
- offsetof(struct arlan_shmem, txBuffer) + 0x800 - (length / 64 + 2) * 64;
- TXTAIL(dev).length = length - ARLAN_FAKE_HDR_LEN;
- for (i = 0; i < 6; i++)
- TXTAIL(dev).dest[i] = buf[i];
- TXTAIL(dev).clear = conf->txClear;
- TXTAIL(dev).retries = conf->txRetries;
- TXTAIL(dev).routing = conf->txRouting;
- TXTAIL(dev).scrambled = conf->txScrambled;
- memcpy_toio(((char __iomem *)arlan + TXTAIL(dev).offset), buf + ARLAN_FAKE_HDR_LEN, TXTAIL(dev).length);
- }
- else
- {
- netif_stop_queue (dev);
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk(KERN_ERR "TX TAIL & HEAD full, return, tailStart %d headEnd %d\n", tailStarts, headEnds);
- return -1;
- }
- priv->out_bytes += length;
- priv->out_bytes10 += length;
- if (conf->measure_rate < 1)
- conf->measure_rate = 1;
- if (time_after(jiffies, priv->out_time + conf->measure_rate * HZ))
- {
- conf->out_speed = priv->out_bytes / conf->measure_rate;
- priv->out_bytes = 0;
- priv->out_time = jiffies;
- }
- if (time_after(jiffies, priv->out_time10 + conf->measure_rate * 10*HZ))
- {
- conf->out_speed10 = priv->out_bytes10 / (10 * conf->measure_rate);
- priv->out_bytes10 = 0;
- priv->out_time10 = jiffies;
- }
- if (TXHEAD(dev).offset && TXTAIL(dev).offset)
- {
- netif_stop_queue (dev);
- return 0;
- }
- else
- netif_start_queue (dev);
-
-
- IFDEBUG(ARLAN_DEBUG_HEADER_DUMP)
- printk(KERN_WARNING "%s Transmit t %2x:%2x:%2x:%2x:%2x:%2x f %2x:%2x:%2x:%2x:%2x:%2x \n", dev->name,
- (unsigned char) buf[0], (unsigned char) buf[1], (unsigned char) buf[2], (unsigned char) buf[3],
- (unsigned char) buf[4], (unsigned char) buf[5], (unsigned char) buf[6], (unsigned char) buf[7],
- (unsigned char) buf[8], (unsigned char) buf[9], (unsigned char) buf[10], (unsigned char) buf[11]);
-
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk(KERN_ERR "TX command prepare for buffer %d\n", priv->txLast);
-
- arlan_command(dev, ARLAN_COMMAND_TX);
-
- priv->tx_last_sent = jiffies;
-
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN) printk("%s TX Qued %d bytes \n", dev->name, length);
-
- ARLAN_DEBUG_EXIT("arlan_hw_tx");
-
- return 0;
-}
-
-
-static int arlan_hw_config(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- struct arlan_conf_stru *conf = priv->Conf;
-
- ARLAN_DEBUG_ENTRY("arlan_hw_config");
-
- printk(KERN_NOTICE "%s arlan configure called \n", dev->name);
- if (arlan_EEPROM_bad)
- printk(KERN_NOTICE "arlan configure with eeprom bad option \n");
-
-
- WRITESHM(arlan->spreadingCode, conf->spreadingCode, u_char);
- WRITESHM(arlan->channelSet, conf->channelSet, u_char);
-
- if (arlan_EEPROM_bad)
- WRITESHM(arlan->defaultChannelSet, conf->channelSet, u_char);
-
- WRITESHM(arlan->channelNumber, conf->channelNumber, u_char);
-
- WRITESHM(arlan->scramblingDisable, conf->scramblingDisable, u_char);
- WRITESHM(arlan->txAttenuation, conf->txAttenuation, u_char);
-
- WRITESHM(arlan->systemId, conf->systemId, u_int);
-
- WRITESHM(arlan->maxRetries, conf->maxRetries, u_char);
- WRITESHM(arlan->receiveMode, conf->receiveMode, u_char);
- WRITESHM(arlan->priority, conf->priority, u_char);
- WRITESHM(arlan->rootOrRepeater, conf->rootOrRepeater, u_char);
- WRITESHM(arlan->SID, conf->SID, u_int);
-
- WRITESHM(arlan->registrationMode, conf->registrationMode, u_char);
-
- WRITESHM(arlan->registrationFill, conf->registrationFill, u_char);
- WRITESHM(arlan->localTalkAddress, conf->localTalkAddress, u_char);
- WRITESHM(arlan->codeFormat, conf->codeFormat, u_char);
- WRITESHM(arlan->numChannels, conf->numChannels, u_char);
- WRITESHM(arlan->channel1, conf->channel1, u_char);
- WRITESHM(arlan->channel2, conf->channel2, u_char);
- WRITESHM(arlan->channel3, conf->channel3, u_char);
- WRITESHM(arlan->channel4, conf->channel4, u_char);
- WRITESHM(arlan->radioNodeId, conf->radioNodeId, u_short);
- WRITESHM(arlan->SID, conf->SID, u_int);
- WRITESHM(arlan->waitTime, conf->waitTime, u_short);
- WRITESHM(arlan->lParameter, conf->lParameter, u_short);
- memcpy_toio(&(arlan->_15), &(conf->_15), 3);
- WRITESHM(arlan->_15, conf->_15, u_short);
- WRITESHM(arlan->headerSize, conf->headerSize, u_short);
- if (arlan_EEPROM_bad)
- WRITESHM(arlan->hardwareType, conf->hardwareType, u_char);
- WRITESHM(arlan->radioType, conf->radioType, u_char);
- if (arlan_EEPROM_bad)
- WRITESHM(arlan->radioModule, conf->radioType, u_char);
-
- memcpy_toio(arlan->encryptionKey + keyStart, encryptionKey, 8);
- memcpy_toio(arlan->name, conf->siteName, 16);
-
- WRITESHMB(arlan->commandByte, ARLAN_COM_INT | ARLAN_COM_CONF); /* do configure */
- memset_io(arlan->commandParameter, 0, 0xf); /* 0xf */
- memset_io(arlan->commandParameter + 1, 0, 2);
- if (conf->writeEEPROM)
- {
- memset_io(arlan->commandParameter, conf->writeEEPROM, 1);
-// conf->writeEEPROM=0;
- }
- if (conf->registrationMode && conf->registrationInterrupts)
- memset_io(arlan->commandParameter + 3, 1, 1);
- else
- memset_io(arlan->commandParameter + 3, 0, 1);
-
- priv->irq_test_done = 0;
-
- if (conf->tx_queue_len)
- dev->tx_queue_len = conf->tx_queue_len;
- udelay(100);
-
- ARLAN_DEBUG_EXIT("arlan_hw_config");
- return 0;
-}
-
-
-static int arlan_read_card_configuration(struct net_device *dev)
-{
- u_char tlx415;
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- struct arlan_conf_stru *conf = priv->Conf;
-
- ARLAN_DEBUG_ENTRY("arlan_read_card_configuration");
-
- if (radioNodeId == radioNodeIdUNKNOWN)
- {
- READSHM(conf->radioNodeId, arlan->radioNodeId, u_short);
- }
- else
- conf->radioNodeId = radioNodeId;
-
- if (SID == SIDUNKNOWN)
- {
- READSHM(conf->SID, arlan->SID, u_int);
- }
- else conf->SID = SID;
-
- if (spreadingCode == spreadingCodeUNKNOWN)
- {
- READSHM(conf->spreadingCode, arlan->spreadingCode, u_char);
- }
- else
- conf->spreadingCode = spreadingCode;
-
- if (channelSet == channelSetUNKNOWN)
- {
- READSHM(conf->channelSet, arlan->channelSet, u_char);
- }
- else conf->channelSet = channelSet;
-
- if (channelNumber == channelNumberUNKNOWN)
- {
- READSHM(conf->channelNumber, arlan->channelNumber, u_char);
- }
- else conf->channelNumber = channelNumber;
-
- READSHM(conf->scramblingDisable, arlan->scramblingDisable, u_char);
- READSHM(conf->txAttenuation, arlan->txAttenuation, u_char);
-
- if (systemId == systemIdUNKNOWN)
- {
- READSHM(conf->systemId, arlan->systemId, u_int);
- }
- else conf->systemId = systemId;
-
- READSHM(conf->maxDatagramSize, arlan->maxDatagramSize, u_short);
- READSHM(conf->maxFrameSize, arlan->maxFrameSize, u_short);
- READSHM(conf->maxRetries, arlan->maxRetries, u_char);
- READSHM(conf->receiveMode, arlan->receiveMode, u_char);
- READSHM(conf->priority, arlan->priority, u_char);
- READSHM(conf->rootOrRepeater, arlan->rootOrRepeater, u_char);
-
- if (SID == SIDUNKNOWN)
- {
- READSHM(conf->SID, arlan->SID, u_int);
- }
- else conf->SID = SID;
-
- if (registrationMode == registrationModeUNKNOWN)
- {
- READSHM(conf->registrationMode, arlan->registrationMode, u_char);
- }
- else conf->registrationMode = registrationMode;
-
- READSHM(conf->registrationFill, arlan->registrationFill, u_char);
- READSHM(conf->localTalkAddress, arlan->localTalkAddress, u_char);
- READSHM(conf->codeFormat, arlan->codeFormat, u_char);
- READSHM(conf->numChannels, arlan->numChannels, u_char);
- READSHM(conf->channel1, arlan->channel1, u_char);
- READSHM(conf->channel2, arlan->channel2, u_char);
- READSHM(conf->channel3, arlan->channel3, u_char);
- READSHM(conf->channel4, arlan->channel4, u_char);
- READSHM(conf->waitTime, arlan->waitTime, u_short);
- READSHM(conf->lParameter, arlan->lParameter, u_short);
- READSHM(conf->_15, arlan->_15, u_short);
- READSHM(conf->headerSize, arlan->headerSize, u_short);
- READSHM(conf->hardwareType, arlan->hardwareType, u_char);
- READSHM(conf->radioType, arlan->radioModule, u_char);
-
- if (conf->radioType == 0)
- conf->radioType = 0xc;
-
- WRITESHM(arlan->configStatus, 0xA5, u_char);
- READSHM(tlx415, arlan->configStatus, u_char);
-
- if (tlx415 != 0xA5)
- printk(KERN_INFO "%s tlx415 chip \n", dev->name);
-
- conf->txClear = 0;
- conf->txRetries = 1;
- conf->txRouting = 1;
- conf->txScrambled = 0;
- conf->rxParameter = 1;
- conf->txTimeoutMs = 4000;
- conf->waitCardTimeout = 100000;
- conf->receiveMode = ARLAN_RCV_CLEAN;
- memcpy_fromio(conf->siteName, arlan->name, 16);
- conf->siteName[16] = '\0';
- conf->retries = retries;
- conf->tx_delay_ms = tx_delay_ms;
- conf->ReTransmitPacketMaxSize = 200;
- conf->waitReTransmitPacketMaxSize = 200;
- conf->txAckTimeoutMs = 900;
- conf->fastReTransCount = 3;
-
- ARLAN_DEBUG_EXIT("arlan_read_card_configuration");
-
- return 0;
-}
-
-
-static int lastFoundAt = 0xbe000;
-
-
-/*
- * This is the real probe routine. Linux has a history of friendly device
- * probes on the ISA bus. A good device probes avoids doing writes, and
- * verifies that the correct device exists and functions.
- */
-#define ARLAN_SHMEM_SIZE 0x2000
-static int __init arlan_check_fingerprint(unsigned long memaddr)
-{
- static const char probeText[] = "TELESYSTEM SLW INC. ARLAN \0";
- volatile struct arlan_shmem __iomem *arlan = (struct arlan_shmem *) memaddr;
- unsigned long paddr = virt_to_phys((void *) memaddr);
- char tempBuf[49];
-
- ARLAN_DEBUG_ENTRY("arlan_check_fingerprint");
-
- if (!request_mem_region(paddr, ARLAN_SHMEM_SIZE, "arlan")) {
- // printk(KERN_WARNING "arlan: memory region %lx excluded from probing \n",paddr);
- return -ENODEV;
- }
-
- memcpy_fromio(tempBuf, arlan->textRegion, 29);
- tempBuf[30] = 0;
-
- /* check for card at this address */
- if (0 != strncmp(tempBuf, probeText, 29)){
- release_mem_region(paddr, ARLAN_SHMEM_SIZE);
- return -ENODEV;
- }
-
-// printk(KERN_INFO "arlan found at 0x%x \n",memaddr);
- ARLAN_DEBUG_EXIT("arlan_check_fingerprint");
-
- return 0;
-}
-
-static int arlan_change_mtu(struct net_device *dev, int new_mtu)
-{
- struct arlan_private *priv = netdev_priv(dev);
- struct arlan_conf_stru *conf = priv->Conf;
-
- ARLAN_DEBUG_ENTRY("arlan_change_mtu");
- if (new_mtu > 2032)
- return -EINVAL;
- dev->mtu = new_mtu;
- if (new_mtu < 256)
- new_mtu = 256; /* cards book suggests 1600 */
- conf->maxDatagramSize = new_mtu;
- conf->maxFrameSize = new_mtu + 48;
-
- arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_CONF);
- printk(KERN_NOTICE "%s mtu changed to %d \n", dev->name, new_mtu);
-
- ARLAN_DEBUG_EXIT("arlan_change_mtu");
-
- return 0;
-}
-
-static int arlan_mac_addr(struct net_device *dev, void *p)
-{
- struct sockaddr *addr = p;
-
-
- ARLAN_DEBUG_ENTRY("arlan_mac_addr");
- return -EINVAL;
-
- if (!netif_running(dev))
- return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
-
- ARLAN_DEBUG_EXIT("arlan_mac_addr");
- return 0;
-}
-
-static const struct net_device_ops arlan_netdev_ops = {
- .ndo_open = arlan_open,
- .ndo_stop = arlan_close,
- .ndo_start_xmit = arlan_tx,
- .ndo_get_stats = arlan_statistics,
- .ndo_set_multicast_list = arlan_set_multicast,
- .ndo_change_mtu = arlan_change_mtu,
- .ndo_set_mac_address = arlan_mac_addr,
- .ndo_tx_timeout = arlan_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static int __init arlan_setup_device(struct net_device *dev, int num)
-{
- struct arlan_private *ap = netdev_priv(dev);
- int err;
-
- ARLAN_DEBUG_ENTRY("arlan_setup_device");
-
- ap->conf = (struct arlan_shmem *)(ap+1);
-
- dev->tx_queue_len = tx_queue_len;
- dev->netdev_ops = &arlan_netdev_ops;
- dev->watchdog_timeo = 3*HZ;
-
- ap->irq_test_done = 0;
- ap->Conf = &arlan_conf[num];
-
- ap->Conf->pre_Command_Wait = 40;
- ap->Conf->rx_tweak1 = 30;
- ap->Conf->rx_tweak2 = 0;
-
-
- err = register_netdev(dev);
- if (err) {
- release_mem_region(virt_to_phys((void *) dev->mem_start),
- ARLAN_SHMEM_SIZE);
- free_netdev(dev);
- return err;
- }
- arlan_device[num] = dev;
- ARLAN_DEBUG_EXIT("arlan_setup_device");
- return 0;
-}
-
-static int __init arlan_probe_here(struct net_device *dev,
- unsigned long memaddr)
-{
- struct arlan_private *ap = netdev_priv(dev);
-
- ARLAN_DEBUG_ENTRY("arlan_probe_here");
-
- if (arlan_check_fingerprint(memaddr))
- return -ENODEV;
-
- printk(KERN_NOTICE "%s: Arlan found at %llx, \n ", dev->name,
- (u64) virt_to_phys((void*)memaddr));
-
- ap->card = (void *) memaddr;
- dev->mem_start = memaddr;
- dev->mem_end = memaddr + ARLAN_SHMEM_SIZE-1;
-
- if (dev->irq < 2)
- {
- READSHM(dev->irq, ap->card->irqLevel, u_char);
- } else if (dev->irq == 2)
- dev->irq = 9;
-
- arlan_read_card_configuration(dev);
-
- ARLAN_DEBUG_EXIT("arlan_probe_here");
- return 0;
-}
-
-
-static int arlan_open(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- int ret = 0;
-
- ARLAN_DEBUG_ENTRY("arlan_open");
-
- ret = request_irq(dev->irq, &arlan_interrupt, 0, dev->name, dev);
- if (ret)
- {
- printk(KERN_ERR "%s: unable to get IRQ %d .\n",
- dev->name, dev->irq);
- return ret;
- }
-
-
- priv->bad = 0;
- priv->lastReset = 0;
- priv->reset = 0;
- memcpy_fromio(dev->dev_addr, arlan->lanCardNodeId, 6);
- memset(dev->broadcast, 0xff, 6);
- dev->tx_queue_len = tx_queue_len;
- priv->interrupt_processing_active = 0;
- spin_lock_init(&priv->lock);
-
- netif_start_queue (dev);
-
- priv->registrationLostCount = 0;
- priv->registrationLastSeen = jiffies;
- priv->txLast = 0;
- priv->tx_command_given = 0;
- priv->rx_command_given = 0;
-
- priv->reRegisterExp = 1;
- priv->tx_last_sent = jiffies - 1;
- priv->tx_last_cleared = jiffies;
- priv->Conf->writeEEPROM = 0;
- priv->Conf->registrationInterrupts = 1;
-
- init_timer(&priv->timer);
- priv->timer.expires = jiffies + HZ / 10;
- priv->timer.data = (unsigned long) dev;
- priv->timer.function = &arlan_registration_timer; /* timer handler */
-
- arlan_command(dev, ARLAN_COMMAND_POWERUP | ARLAN_COMMAND_LONG_WAIT_NOW);
- mdelay(200);
- add_timer(&priv->timer);
-
- ARLAN_DEBUG_EXIT("arlan_open");
- return 0;
-}
-
-
-static void arlan_tx_timeout (struct net_device *dev)
-{
- printk(KERN_ERR "%s: arlan transmit timed out, kernel decided\n", dev->name);
- /* Try to restart the adaptor. */
- arlan_command(dev, ARLAN_COMMAND_CLEAN_AND_RESET);
- // dev->trans_start = jiffies;
- // netif_start_queue (dev);
-}
-
-
-static int arlan_tx(struct sk_buff *skb, struct net_device *dev)
-{
- short length;
- unsigned char *buf;
-
- ARLAN_DEBUG_ENTRY("arlan_tx");
-
- length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- buf = skb->data;
-
- if (length + 0x12 > 0x800) {
- printk(KERN_ERR "TX RING overflow \n");
- netif_stop_queue (dev);
- }
-
- if (arlan_hw_tx(dev, buf, length) == -1)
- goto bad_end;
-
- dev->trans_start = jiffies;
-
- dev_kfree_skb(skb);
-
- arlan_process_interrupt(dev);
- ARLAN_DEBUG_EXIT("arlan_tx");
- return 0;
-
-bad_end:
- arlan_process_interrupt(dev);
- netif_stop_queue (dev);
- ARLAN_DEBUG_EXIT("arlan_tx");
- return 1;
-}
-
-
-static inline int DoNotReTransmitCrap(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- if (TXLAST(dev).length < priv->Conf->ReTransmitPacketMaxSize)
- return 1;
- return 0;
-
-}
-
-static inline int DoNotWaitReTransmitCrap(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- if (TXLAST(dev).length < priv->Conf->waitReTransmitPacketMaxSize)
- return 1;
- return 0;
-}
-
-static inline void arlan_queue_retransmit(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- ARLAN_DEBUG_ENTRY("arlan_queue_retransmit");
-
- if (DoNotWaitReTransmitCrap(dev))
- {
- arlan_drop_tx(dev);
- } else
- priv->ReTransmitRequested++;
-
- ARLAN_DEBUG_EXIT("arlan_queue_retransmit");
-}
-
-static inline void RetryOrFail(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- ARLAN_DEBUG_ENTRY("RetryOrFail");
-
- if (priv->retransmissions > priv->Conf->retries ||
- DoNotReTransmitCrap(dev))
- {
- arlan_drop_tx(dev);
- }
- else if (priv->bad <= priv->Conf->fastReTransCount)
- {
- arlan_retransmit_now(dev);
- }
- else arlan_queue_retransmit(dev);
-
- ARLAN_DEBUG_EXIT("RetryOrFail");
-}
-
-
-static void arlan_tx_done_interrupt(struct net_device *dev, int status)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- ARLAN_DEBUG_ENTRY("arlan_tx_done_interrupt");
-
- priv->tx_last_cleared = jiffies;
- priv->tx_command_given = 0;
- switch (status)
- {
- case 1:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit OK\n");
- dev->stats.tx_packets++;
- priv->bad = 0;
- priv->reset = 0;
- priv->retransmissions = 0;
- if (priv->Conf->tx_delay_ms)
- {
- priv->tx_done_delayed = jiffies + (priv->Conf->tx_delay_ms * HZ) / 1000 + 1;
- }
- else
- {
- TXLAST(dev).offset = 0;
- if (priv->txLast)
- priv->txLast = 0;
- else if (TXTAIL(dev).offset)
- priv->txLast = 1;
- if (TXLAST(dev).offset)
- {
- arlan_retransmit_now(dev);
- dev->trans_start = jiffies;
- }
- if (!TXHEAD(dev).offset || !TXTAIL(dev).offset)
- {
- netif_wake_queue (dev);
- }
- }
- }
- break;
-
- case 2:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit timed out\n");
- priv->bad += 1;
- //arlan_queue_retransmit(dev);
- RetryOrFail(dev);
- }
- break;
-
- case 3:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit max retries\n");
- priv->bad += 1;
- priv->reset = 0;
- //arlan_queue_retransmit(dev);
- RetryOrFail(dev);
- }
- break;
-
- case 4:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit aborted\n");
- priv->bad += 1;
- arlan_queue_retransmit(dev);
- //RetryOrFail(dev);
- }
- break;
-
- case 5:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit not registered\n");
- priv->bad += 1;
- //debug=101;
- arlan_queue_retransmit(dev);
- }
- break;
-
- case 6:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit destination full\n");
- priv->bad += 1;
- priv->reset = 0;
- //arlan_drop_tx(dev);
- arlan_queue_retransmit(dev);
- }
- break;
-
- case 7:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit unknown ack\n");
- priv->bad += 1;
- priv->reset = 0;
- arlan_queue_retransmit(dev);
- }
- break;
-
- case 8:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit dest mail box full\n");
- priv->bad += 1;
- priv->reset = 0;
- //arlan_drop_tx(dev);
- arlan_queue_retransmit(dev);
- }
- break;
-
- case 9:
- {
- IFDEBUG(ARLAN_DEBUG_TX_CHAIN)
- printk("arlan intr: transmit root dest not reg.\n");
- priv->bad += 1;
- priv->reset = 1;
- //arlan_drop_tx(dev);
- arlan_queue_retransmit(dev);
- }
- break;
-
- default:
- {
- printk(KERN_ERR "arlan intr: transmit status unknown\n");
- priv->bad += 1;
- priv->reset = 1;
- arlan_drop_tx(dev);
- }
- }
-
- ARLAN_DEBUG_EXIT("arlan_tx_done_interrupt");
-}
-
-
-static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short rxOffset, u_short pkt_len)
-{
- char *skbtmp;
- int i = 0;
-
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- struct arlan_conf_stru *conf = priv->Conf;
-
-
- ARLAN_DEBUG_ENTRY("arlan_rx_interrupt");
- // by spec, not WRITESHMB(arlan->rxStatus,0x00);
- // prohibited here arlan_command(dev, ARLAN_COMMAND_RX);
-
- if (pkt_len < 10 || pkt_len > 2048)
- {
- printk(KERN_WARNING "%s: got too short or long packet, len %d \n", dev->name, pkt_len);
- return;
- }
- if (rxOffset + pkt_len > 0x2000)
- {
- printk("%s: got too long packet, len %d offset %x\n", dev->name, pkt_len, rxOffset);
- return;
- }
- priv->in_bytes += pkt_len;
- priv->in_bytes10 += pkt_len;
- if (conf->measure_rate < 1)
- conf->measure_rate = 1;
- if (time_after(jiffies, priv->in_time + conf->measure_rate * HZ))
- {
- conf->in_speed = priv->in_bytes / conf->measure_rate;
- priv->in_bytes = 0;
- priv->in_time = jiffies;
- }
- if (time_after(jiffies, priv->in_time10 + conf->measure_rate * 10*HZ))
- {
- conf->in_speed10 = priv->in_bytes10 / (10 * conf->measure_rate);
- priv->in_bytes10 = 0;
- priv->in_time10 = jiffies;
- }
- DEBUGSHM(1, "arlan rcv pkt rxStatus= %d ", arlan->rxStatus, u_char);
- switch (rxStatus)
- {
- case 1:
- case 2:
- case 3:
- {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- DEBUGSHM(50, "arlan recv pkt offs=%d\n", arlan->rxOffset, u_short);
- DEBUGSHM(1, "arlan rxFrmType = %d \n", arlan->rxFrmType, u_char);
- DEBUGSHM(1, KERN_INFO "arlan rx scrambled = %d \n", arlan->scrambled, u_char);
-
- /* here we do multicast filtering to avoid slow 8-bit memcopy */
-#ifdef ARLAN_MULTICAST
- if (!(dev->flags & IFF_ALLMULTI) &&
- !(dev->flags & IFF_PROMISC) &&
- dev->mc_list)
- {
- char hw_dst_addr[6];
- struct dev_mc_list *dmi = dev->mc_list;
- int i;
-
- memcpy_fromio(hw_dst_addr, arlan->ultimateDestAddress, 6);
- if (hw_dst_addr[0] == 0x01)
- {
- if (mdebug)
- if (hw_dst_addr[1] == 0x00)
- printk(KERN_ERR "%s mcast 0x0100 \n", dev->name);
- else if (hw_dst_addr[1] == 0x40)
- printk(KERN_ERR "%s m/bcast 0x0140 \n", dev->name);
- while (dmi)
- {
- if (dmi->dmi_addrlen == 6) {
- if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP)
- printk(KERN_ERR "%s mcl %pM\n",
- dev->name, dmi->dmi_addr);
- for (i = 0; i < 6; i++)
- if (dmi->dmi_addr[i] != hw_dst_addr[i])
- break;
- if (i == 6)
- break;
- } else
- printk(KERN_ERR "%s: invalid multicast address length given.\n", dev->name);
- dmi = dmi->next;
- }
- /* we reach here if multicast filtering is on and packet
- * is multicast and not for receive */
- goto end_of_interrupt;
- }
- }
-#endif // ARLAN_MULTICAST
- /* multicast filtering ends here */
- pkt_len += ARLAN_FAKE_HDR_LEN;
-
- skb = dev_alloc_skb(pkt_len + 4);
- if (skb == NULL)
- {
- printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
- dev->stats.rx_dropped++;
- break;
- }
- skb_reserve(skb, 2);
- skbtmp = skb_put(skb, pkt_len);
-
- memcpy_fromio(skbtmp + ARLAN_FAKE_HDR_LEN, ((char __iomem *) arlan) + rxOffset, pkt_len - ARLAN_FAKE_HDR_LEN);
- memcpy_fromio(skbtmp, arlan->ultimateDestAddress, 6);
- memcpy_fromio(skbtmp + 6, arlan->rxSrc, 6);
- WRITESHMB(arlan->rxStatus, 0x00);
- arlan_command(dev, ARLAN_COMMAND_RX);
-
- IFDEBUG(ARLAN_DEBUG_HEADER_DUMP)
- {
- char immedDestAddress[6];
- char immedSrcAddress[6];
- memcpy_fromio(immedDestAddress, arlan->immedDestAddress, 6);
- memcpy_fromio(immedSrcAddress, arlan->immedSrcAddress, 6);
-
- printk(KERN_WARNING "%s t %pM f %pM imd %pM ims %pM\n",
- dev->name, skbtmp,
- &skbtmp[6],
- immedDestAddress,
- immedSrcAddress);
- }
- skb->protocol = eth_type_trans(skb, dev);
- IFDEBUG(ARLAN_DEBUG_HEADER_DUMP)
- if (skb->protocol != 0x608 && skb->protocol != 0x8)
- {
- for (i = 0; i <= 22; i++)
- printk("%02x:", (u_char) skbtmp[i + 12]);
- printk(KERN_ERR "\n");
- printk(KERN_WARNING "arlan kernel pkt type trans %x \n", skb->protocol);
- }
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- }
- break;
-
- default:
- printk(KERN_ERR "arlan intr: received unknown status\n");
- dev->stats.rx_crc_errors++;
- break;
- }
- ARLAN_DEBUG_EXIT("arlan_rx_interrupt");
-}
-
-static void arlan_process_interrupt(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- u_char rxStatus = READSHMB(arlan->rxStatus);
- u_char txStatus = READSHMB(arlan->txStatus);
- u_short rxOffset = READSHMS(arlan->rxOffset);
- u_short pkt_len = READSHMS(arlan->rxLength);
- int interrupt_count = 0;
-
- ARLAN_DEBUG_ENTRY("arlan_process_interrupt");
-
- if (test_and_set_bit(0, (void *) &priv->interrupt_processing_active))
- {
- if (arlan_debug & ARLAN_DEBUG_CHAIN_LOCKS)
- printk(KERN_ERR "interrupt chain reentering \n");
- goto end_int_process;
- }
- while ((rxStatus || txStatus || priv->interrupt_ack_requested)
- && (interrupt_count < 5))
- {
- if (rxStatus)
- priv->last_rx_int_ack_time = jiffies;
-
- arlan_command(dev, ARLAN_COMMAND_INT_ACK);
- arlan_command(dev, ARLAN_COMMAND_INT_ENABLE);
-
- IFDEBUG(ARLAN_DEBUG_INTERRUPT)
- printk(KERN_ERR "%s: got IRQ rx %x tx %x comm %x rxOff %x rxLen %x \n",
- dev->name, rxStatus, txStatus, READSHMB(arlan->commandByte),
- rxOffset, pkt_len);
-
- if (rxStatus == 0 && txStatus == 0)
- {
- if (priv->irq_test_done)
- {
- if (!registrationBad(dev))
- IFDEBUG(ARLAN_DEBUG_INTERRUPT) printk(KERN_ERR "%s unknown interrupt(nop? regLost ?) reason tx %d rx %d ",
- dev->name, txStatus, rxStatus);
- } else {
- IFDEBUG(ARLAN_DEBUG_INTERRUPT)
- printk(KERN_INFO "%s irq $%d test OK \n", dev->name, dev->irq);
-
- }
- priv->interrupt_ack_requested = 0;
- goto ends;
- }
- if (txStatus != 0)
- {
- WRITESHMB(arlan->txStatus, 0x00);
- arlan_tx_done_interrupt(dev, txStatus);
- goto ends;
- }
- if (rxStatus == 1 || rxStatus == 2)
- { /* a packet waiting */
- arlan_rx_interrupt(dev, rxStatus, rxOffset, pkt_len);
- goto ends;
- }
- if (rxStatus > 2 && rxStatus < 0xff)
- {
- WRITESHMB(arlan->rxStatus, 0x00);
- printk(KERN_ERR "%s unknown rxStatus reason tx %d rx %d ",
- dev->name, txStatus, rxStatus);
- goto ends;
- }
- if (rxStatus == 0xff)
- {
- WRITESHMB(arlan->rxStatus, 0x00);
- arlan_command(dev, ARLAN_COMMAND_RX);
- if (registrationBad(dev))
- netif_device_detach(dev);
- if (!registrationBad(dev))
- {
- priv->registrationLastSeen = jiffies;
- if (!netif_queue_stopped(dev) && !priv->under_reset && !priv->under_config)
- netif_wake_queue (dev);
- }
- goto ends;
- }
-ends:
-
- arlan_command_process(dev);
-
- rxStatus = READSHMB(arlan->rxStatus);
- txStatus = READSHMB(arlan->txStatus);
- rxOffset = READSHMS(arlan->rxOffset);
- pkt_len = READSHMS(arlan->rxLength);
-
-
- priv->irq_test_done = 1;
-
- interrupt_count++;
- }
- priv->interrupt_processing_active = 0;
-
-end_int_process:
- arlan_command_process(dev);
-
- ARLAN_DEBUG_EXIT("arlan_process_interrupt");
- return;
-}
-
-static irqreturn_t arlan_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- u_char rxStatus = READSHMB(arlan->rxStatus);
- u_char txStatus = READSHMB(arlan->txStatus);
-
- ARLAN_DEBUG_ENTRY("arlan_interrupt");
-
-
- if (!rxStatus && !txStatus)
- priv->interrupt_ack_requested++;
-
- arlan_process_interrupt(dev);
-
- priv->irq_test_done = 1;
-
- ARLAN_DEBUG_EXIT("arlan_interrupt");
- return IRQ_HANDLED;
-
-}
-
-
-static int arlan_close(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
-
- ARLAN_DEBUG_ENTRY("arlan_close");
-
- del_timer_sync(&priv->timer);
-
- arlan_command(dev, ARLAN_COMMAND_POWERDOWN);
-
- IFDEBUG(ARLAN_DEBUG_STARTUP)
- printk(KERN_NOTICE "%s: Closing device\n", dev->name);
-
- netif_stop_queue(dev);
- free_irq(dev->irq, dev);
-
- ARLAN_DEBUG_EXIT("arlan_close");
- return 0;
-}
-
-#ifdef ARLAN_DEBUGGING
-static long alignLong(volatile u_char * ptr)
-{
- long ret;
- memcpy_fromio(&ret, (void *) ptr, 4);
- return ret;
-}
-#endif
-
-/*
- * Get the current statistics.
- * This may be called with the card open or closed.
- */
-
-static struct net_device_stats *arlan_statistics(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
-
-
- ARLAN_DEBUG_ENTRY("arlan_statistics");
-
- /* Update the statistics from the device registers. */
-
- READSHM(dev->stats.collisions, arlan->numReTransmissions, u_int);
- READSHM(dev->stats.rx_crc_errors, arlan->numCRCErrors, u_int);
- READSHM(dev->stats.rx_dropped, arlan->numFramesDiscarded, u_int);
- READSHM(dev->stats.rx_fifo_errors, arlan->numRXBufferOverflows, u_int);
- READSHM(dev->stats.rx_frame_errors, arlan->numReceiveFramesLost, u_int);
- READSHM(dev->stats.rx_over_errors, arlan->numRXOverruns, u_int);
- READSHM(dev->stats.rx_packets, arlan->numDatagramsReceived, u_int);
- READSHM(dev->stats.tx_aborted_errors, arlan->numAbortErrors, u_int);
- READSHM(dev->stats.tx_carrier_errors, arlan->numStatusTimeouts, u_int);
- READSHM(dev->stats.tx_dropped, arlan->numDatagramsDiscarded, u_int);
- READSHM(dev->stats.tx_fifo_errors, arlan->numTXUnderruns, u_int);
- READSHM(dev->stats.tx_packets, arlan->numDatagramsTransmitted, u_int);
- READSHM(dev->stats.tx_window_errors, arlan->numHoldOffs, u_int);
-
- ARLAN_DEBUG_EXIT("arlan_statistics");
-
- return &dev->stats;
-}
-
-
-static void arlan_set_multicast(struct net_device *dev)
-{
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- struct arlan_conf_stru *conf = priv->Conf;
- int board_conf_needed = 0;
-
-
- ARLAN_DEBUG_ENTRY("arlan_set_multicast");
-
- if (dev->flags & IFF_PROMISC)
- {
- unsigned char recMode;
- READSHM(recMode, arlan->receiveMode, u_char);
- conf->receiveMode = (ARLAN_RCV_PROMISC | ARLAN_RCV_CONTROL);
- if (conf->receiveMode != recMode)
- board_conf_needed = 1;
- }
- else
- {
- /* turn off promiscuous mode */
- unsigned char recMode;
- READSHM(recMode, arlan->receiveMode, u_char);
- conf->receiveMode = ARLAN_RCV_CLEAN | ARLAN_RCV_CONTROL;
- if (conf->receiveMode != recMode)
- board_conf_needed = 1;
- }
- if (board_conf_needed)
- arlan_command(dev, ARLAN_COMMAND_CONF);
-
- ARLAN_DEBUG_EXIT("arlan_set_multicast");
-}
-
-
-struct net_device * __init arlan_probe(int unit)
-{
- struct net_device *dev;
- int err;
- int m;
-
- ARLAN_DEBUG_ENTRY("arlan_probe");
-
- if (arlans_found == MAX_ARLANS)
- return ERR_PTR(-ENODEV);
-
- /*
- * Reserve space for local data and a copy of the shared memory
- * that is used by the /proc interface.
- */
- dev = alloc_etherdev(sizeof(struct arlan_private)
- + sizeof(struct arlan_shmem));
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- if (dev->mem_start) {
- if (arlan_probe_here(dev, dev->mem_start) == 0)
- goto found;
- goto not_found;
- }
-
- }
-
-
- for (m = (int)phys_to_virt(lastFoundAt) + ARLAN_SHMEM_SIZE;
- m <= (int)phys_to_virt(0xDE000);
- m += ARLAN_SHMEM_SIZE)
- {
- if (arlan_probe_here(dev, m) == 0)
- {
- lastFoundAt = (int)virt_to_phys((void*)m);
- goto found;
- }
- }
-
- if (lastFoundAt == 0xbe000)
- printk(KERN_ERR "arlan: No Arlan devices found \n");
-
- not_found:
- free_netdev(dev);
- return ERR_PTR(-ENODEV);
-
- found:
- err = arlan_setup_device(dev, arlans_found);
- if (err)
- dev = ERR_PTR(err);
- else if (!arlans_found++)
- printk(KERN_INFO "Arlan driver %s\n", arlan_version);
-
- return dev;
-}
-
-#ifdef MODULE
-int __init init_module(void)
-{
- int i = 0;
-
- ARLAN_DEBUG_ENTRY("init_module");
-
- if (channelSet != channelSetUNKNOWN || channelNumber != channelNumberUNKNOWN || systemId != systemIdUNKNOWN)
- return -EINVAL;
-
- for (i = 0; i < MAX_ARLANS; i++) {
- struct net_device *dev = arlan_probe(i);
-
- if (IS_ERR(dev))
- return PTR_ERR(dev);
- }
- init_arlan_proc();
- printk(KERN_INFO "Arlan driver %s\n", arlan_version);
- ARLAN_DEBUG_EXIT("init_module");
- return 0;
-}
-
-
-void __exit cleanup_module(void)
-{
- int i = 0;
- struct net_device *dev;
-
- ARLAN_DEBUG_ENTRY("cleanup_module");
-
- IFDEBUG(ARLAN_DEBUG_SHUTDOWN)
- printk(KERN_INFO "arlan: unloading module\n");
-
- cleanup_arlan_proc();
-
- for (i = 0; i < MAX_ARLANS; i++)
- {
- dev = arlan_device[i];
- if (dev) {
- arlan_command(dev, ARLAN_COMMAND_POWERDOWN );
-
- unregister_netdev(dev);
- release_mem_region(virt_to_phys((void *) dev->mem_start),
- ARLAN_SHMEM_SIZE);
- free_netdev(dev);
- arlan_device[i] = NULL;
- }
- }
-
- ARLAN_DEBUG_EXIT("cleanup_module");
-}
-
-
-#endif
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/arlan-proc.c b/drivers/net/wireless/arlan-proc.c
deleted file mode 100644
index 2ab1d59870f4..000000000000
--- a/drivers/net/wireless/arlan-proc.c
+++ /dev/null
@@ -1,1253 +0,0 @@
-#include "arlan.h"
-
-#include <linux/sysctl.h>
-
-#ifdef CONFIG_PROC_FS
-
-/* void enableReceive(struct net_device* dev);
-*/
-
-
-
-#define ARLAN_STR_SIZE 0x2ff0
-#define DEV_ARLAN_INFO 1
-#define DEV_ARLAN 1
-#define SARLG(type,var) {\
- pos += sprintf(arlan_drive_info+pos, "%s\t=\t0x%x\n", #var, READSHMB(priva->card->var)); \
- }
-
-#define SARLBN(type,var,nn) {\
- pos += sprintf(arlan_drive_info+pos, "%s\t=\t0x",#var);\
- for (i=0; i < nn; i++ ) pos += sprintf(arlan_drive_info+pos, "%02x",READSHMB(priva->card->var[i]));\
- pos += sprintf(arlan_drive_info+pos, "\n"); \
- }
-
-#define SARLBNpln(type,var,nn) {\
- for (i=0; i < nn; i++ ) pos += sprintf(arlan_drive_info+pos, "%02x",READSHMB(priva->card->var[i]));\
- }
-
-#define SARLSTR(var,nn) {\
- char tmpStr[400];\
- int tmpLn = nn;\
- if (nn > 399 ) tmpLn = 399; \
- memcpy(tmpStr,(char *) priva->conf->var,tmpLn);\
- tmpStr[tmpLn] = 0; \
- pos += sprintf(arlan_drive_info+pos, "%s\t=\t%s \n",#var,priva->conf->var);\
- }
-
-#define SARLUC(var) SARLG(u_char, var)
-#define SARLUCN(var,nn) SARLBN(u_char,var, nn)
-#define SARLUS(var) SARLG(u_short, var)
-#define SARLUSN(var,nn) SARLBN(u_short,var, nn)
-#define SARLUI(var) SARLG(u_int, var)
-
-#define SARLUSA(var) {\
- u_short tmpVar;\
- memcpy(&tmpVar, (short *) priva->conf->var,2); \
- pos += sprintf(arlan_drive_info+pos, "%s\t=\t0x%x\n",#var, tmpVar);\
-}
-
-#define SARLUIA(var) {\
- u_int tmpVar;\
- memcpy(&tmpVar, (int* )priva->conf->var,4); \
- pos += sprintf(arlan_drive_info+pos, "%s\t=\t0x%x\n",#var, tmpVar);\
-}
-
-
-static const char *arlan_diagnostic_info_string(struct net_device *dev)
-{
-
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- u_char diagnosticInfo;
-
- READSHM(diagnosticInfo, arlan->diagnosticInfo, u_char);
-
- switch (diagnosticInfo)
- {
- case 0xFF:
- return "Diagnostic info is OK";
- case 0xFE:
- return "ERROR EPROM Checksum error ";
- case 0xFD:
- return "ERROR Local Ram Test Failed ";
- case 0xFC:
- return "ERROR SCC failure ";
- case 0xFB:
- return "ERROR BackBone failure ";
- case 0xFA:
- return "ERROR transceiver not found ";
- case 0xF9:
- return "ERROR no more address space ";
- case 0xF8:
- return "ERROR Checksum error ";
- case 0xF7:
- return "ERROR Missing SS Code";
- case 0xF6:
- return "ERROR Invalid config format";
- case 0xF5:
- return "ERROR Reserved errorcode F5";
- case 0xF4:
- return "ERROR Invalid spreading code/channel number";
- case 0xF3:
- return "ERROR Load Code Error";
- case 0xF2:
- return "ERROR Reserver errorcode F2 ";
- case 0xF1:
- return "ERROR Invalid command receivec by LAN card ";
- case 0xF0:
- return "ERROR Invalid parameter found in command ";
- case 0xEF:
- return "ERROR On-chip timer failure ";
- case 0xEE:
- return "ERROR T410 timer failure ";
- case 0xED:
- return "ERROR Too Many TxEnable commands ";
- case 0xEC:
- return "ERROR EEPROM error on radio module ";
- default:
- return "ERROR unknown Diagnostic info reply code ";
- }
-}
-
-static const char *arlan_hardware_type_string(struct net_device *dev)
-{
- u_char hardwareType;
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
-
- READSHM(hardwareType, arlan->hardwareType, u_char);
- switch (hardwareType)
- {
- case 0x00:
- return "type A450";
- case 0x01:
- return "type A650 ";
- case 0x04:
- return "type TMA coproc";
- case 0x0D:
- return "type A650E ";
- case 0x18:
- return "type TMA coproc Australian";
- case 0x19:
- return "type A650A ";
- case 0x26:
- return "type TMA coproc European";
- case 0x2E:
- return "type A655 ";
- case 0x2F:
- return "type A655A ";
- case 0x30:
- return "type A655E ";
- case 0x0B:
- return "type A670 ";
- case 0x0C:
- return "type A670E ";
- case 0x2D:
- return "type A670A ";
- case 0x0F:
- return "type A411T";
- case 0x16:
- return "type A411TA";
- case 0x1B:
- return "type A440T";
- case 0x1C:
- return "type A412T";
- case 0x1E:
- return "type A412TA";
- case 0x22:
- return "type A411TE";
- case 0x24:
- return "type A412TE";
- case 0x27:
- return "type A671T ";
- case 0x29:
- return "type A671TA ";
- case 0x2B:
- return "type A671TE ";
- case 0x31:
- return "type A415T ";
- case 0x33:
- return "type A415TA ";
- case 0x35:
- return "type A415TE ";
- case 0x37:
- return "type A672";
- case 0x39:
- return "type A672A ";
- case 0x3B:
- return "type A672T";
- case 0x6B:
- return "type IC2200";
- default:
- return "type A672T";
- }
-}
-#ifdef ARLAN_DEBUGGING
-static void arlan_print_diagnostic_info(struct net_device *dev)
-{
- int i;
- u_char diagnosticInfo;
- u_short diagnosticOffset;
- u_char hardwareType;
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
-
- // ARLAN_DEBUG_ENTRY("arlan_print_diagnostic_info");
-
- if (READSHMB(arlan->configuredStatusFlag) == 0)
- printk("Arlan: Card NOT configured\n");
- else
- printk("Arlan: Card is configured\n");
-
- READSHM(diagnosticInfo, arlan->diagnosticInfo, u_char);
- READSHM(diagnosticOffset, arlan->diagnosticOffset, u_short);
-
- printk(KERN_INFO "%s\n", arlan_diagnostic_info_string(dev));
-
- if (diagnosticInfo != 0xff)
- printk("%s arlan: Diagnostic Offset %d \n", dev->name, diagnosticOffset);
-
- printk("arlan: LAN CODE ID = ");
- for (i = 0; i < 6; i++)
- DEBUGSHM(1, "%03d:", arlan->lanCardNodeId[i], u_char);
- printk("\n");
-
- printk("arlan: Arlan BroadCast address = ");
- for (i = 0; i < 6; i++)
- DEBUGSHM(1, "%03d:", arlan->broadcastAddress[i], u_char);
- printk("\n");
-
- READSHM(hardwareType, arlan->hardwareType, u_char);
- printk(KERN_INFO "%s\n", arlan_hardware_type_string(dev));
-
-
- DEBUGSHM(1, "arlan: channelNumber=%d\n", arlan->channelNumber, u_char);
- DEBUGSHM(1, "arlan: channelSet=%d\n", arlan->channelSet, u_char);
- DEBUGSHM(1, "arlan: spreadingCode=%d\n", arlan->spreadingCode, u_char);
- DEBUGSHM(1, "arlan: radioNodeId=%d\n", arlan->radioNodeId, u_short);
- DEBUGSHM(1, "arlan: SID =%d\n", arlan->SID, u_short);
- DEBUGSHM(1, "arlan: rxOffset=%d\n", arlan->rxOffset, u_short);
-
- DEBUGSHM(1, "arlan: registration mode is %d\n", arlan->registrationMode, u_char);
-
- printk("arlan: name= ");
- IFDEBUG(1)
-
- for (i = 0; i < 16; i++)
- {
- char c;
- READSHM(c, arlan->name[i], char);
- if (c)
- printk("%c", c);
- }
- printk("\n");
-
-// ARLAN_DEBUG_EXIT("arlan_print_diagnostic_info");
-
-}
-
-
-/****************************** TEST MEMORY **************/
-
-static int arlan_hw_test_memory(struct net_device *dev)
-{
- u_char *ptr;
- int i;
- int memlen = sizeof(struct arlan_shmem) - 0xF; /* avoid control register */
- volatile char *arlan_mem = (char *) (dev->mem_start);
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
- char pattern;
-
- ptr = NULL;
-
- /* hold card in reset state */
- setHardwareReset(dev);
-
- /* test memory */
- pattern = 0;
- for (i = 0; i < memlen; i++)
- WRITESHM(arlan_mem[i], ((u_char) pattern++), u_char);
-
- pattern = 0;
- for (i = 0; i < memlen; i++)
- {
- char res;
- READSHM(res, arlan_mem[i], char);
- if (res != pattern++)
- {
- printk(KERN_ERR "Arlan driver memory test 1 failed \n");
- return -1;
- }
- }
-
- pattern = 0;
- for (i = 0; i < memlen; i++)
- WRITESHM(arlan_mem[i], ~(pattern++), char);
-
- pattern = 0;
- for (i = 0; i < memlen; i++)
- {
- char res;
- READSHM(res, arlan_mem[i], char);
- if (res != ~(pattern++))
- {
- printk(KERN_ERR "Arlan driver memory test 2 failed \n");
- return -1;
- }
- }
-
- /* zero memory */
- for (i = 0; i < memlen; i++)
- WRITESHM(arlan_mem[i], 0x00, char);
-
- IFDEBUG(1) printk(KERN_INFO "Arlan: memory tests ok\n");
-
- /* set reset flag and then release reset */
- WRITESHM(arlan->resetFlag, 0xff, u_char);
-
- clearChannelAttention(dev);
- clearHardwareReset(dev);
-
- /* wait for reset flag to become zero, we'll wait for two seconds */
- if (arlan_command(dev, ARLAN_COMMAND_LONG_WAIT_NOW))
- {
- printk(KERN_ERR "%s arlan: failed to come back from memory test\n", dev->name);
- return -1;
- }
- return 0;
-}
-
-static int arlan_setup_card_by_book(struct net_device *dev)
-{
- u_char irqLevel, configuredStatusFlag;
- struct arlan_private *priv = netdev_priv(dev);
- volatile struct arlan_shmem __iomem *arlan = priv->card;
-
-// ARLAN_DEBUG_ENTRY("arlan_setup_card");
-
- READSHM(configuredStatusFlag, arlan->configuredStatusFlag, u_char);
-
- IFDEBUG(10)
- if (configuredStatusFlag != 0)
- IFDEBUG(10) printk("arlan: CARD IS CONFIGURED\n");
- else
- IFDEBUG(10) printk("arlan: card is NOT configured\n");
-
- if (testMemory || (READSHMB(arlan->diagnosticInfo) != 0xff))
- if (arlan_hw_test_memory(dev))
- return -1;
-
- DEBUGSHM(4, "arlan configuredStatus = %d \n", arlan->configuredStatusFlag, u_char);
- DEBUGSHM(4, "arlan driver diagnostic: 0x%2x\n", arlan->diagnosticInfo, u_char);
-
- /* issue nop command - no interrupt */
- arlan_command(dev, ARLAN_COMMAND_NOOP);
- if (arlan_command(dev, ARLAN_COMMAND_WAIT_NOW) != 0)
- return -1;
-
- IFDEBUG(50) printk("1st Noop successfully executed !!\n");
-
- /* try to turn on the arlan interrupts */
- clearClearInterrupt(dev);
- setClearInterrupt(dev);
- setInterruptEnable(dev);
-
- /* issue nop command - with interrupt */
-
- arlan_command(dev, ARLAN_COMMAND_NOOPINT);
- if (arlan_command(dev, ARLAN_COMMAND_WAIT_NOW) != 0)
- return -1;
-
-
- IFDEBUG(50) printk("2nd Noop successfully executed !!\n");
-
- READSHM(irqLevel, arlan->irqLevel, u_char)
-
- if (irqLevel != dev->irq)
- {
- IFDEBUG(1) printk(KERN_WARNING "arlan dip switches set irq to %d\n", irqLevel);
- printk(KERN_WARNING "device driver irq set to %d - does not match\n", dev->irq);
- dev->irq = irqLevel;
- }
- else
- IFDEBUG(2) printk("irq level is OK\n");
-
-
- IFDEBUG(3) arlan_print_diagnostic_info(dev);
-
- arlan_command(dev, ARLAN_COMMAND_CONF);
-
- READSHM(configuredStatusFlag, arlan->configuredStatusFlag, u_char);
- if (configuredStatusFlag == 0)
- {
- printk(KERN_WARNING "arlan configure failed\n");
- return -1;
- }
- arlan_command(dev, ARLAN_COMMAND_LONG_WAIT_NOW);
- arlan_command(dev, ARLAN_COMMAND_RX);
- arlan_command(dev, ARLAN_COMMAND_LONG_WAIT_NOW);
- printk(KERN_NOTICE "%s: arlan driver version %s loaded\n",
- dev->name, arlan_version);
-
-// ARLAN_DEBUG_EXIT("arlan_setup_card");
-
- return 0; /* no errors */
-}
-#endif
-
-#ifdef ARLAN_PROC_INTERFACE
-#ifdef ARLAN_PROC_SHM_DUMP
-
-static char arlan_drive_info[ARLAN_STR_SIZE] = "A655\n\0";
-
-static int arlan_sysctl_info(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
-{
- int i;
- int retv, pos, devnum;
- struct arlan_private *priva = NULL;
- struct net_device *dev;
- pos = 0;
- if (write)
- {
- printk("wrirte: ");
- for (i = 0; i < 100; i++)
- printk("adi %x \n", arlan_drive_info[i]);
- }
- if (ctl->procname == NULL || arlan_drive_info == NULL)
- {
- printk(KERN_WARNING " procname is NULL in sysctl_table or arlan_drive_info is NULL \n at arlan module\n ");
- return -1;
- }
- devnum = ctl->procname[5] - '0';
- if (devnum < 0 || devnum > MAX_ARLANS - 1)
- {
- printk(KERN_WARNING "too strange devnum in procfs parse\n ");
- return -1;
- }
- else if (arlan_device[devnum] == NULL)
- {
- if (ctl->procname)
- pos += sprintf(arlan_drive_info + pos, "\t%s\n\n", ctl->procname);
- pos += sprintf(arlan_drive_info + pos, "No device found here \n");
- goto final;
- }
- else
- priva = netdev_priv(arlan_device[devnum]);
-
- if (priva == NULL)
- {
- printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n ");
- return -1;
- }
- dev = arlan_device[devnum];
-
- memcpy_fromio(priva->conf, priva->card, sizeof(struct arlan_shmem));
-
- pos = sprintf(arlan_drive_info, "Arlan info \n");
- /* Header Signature */
- SARLSTR(textRegion, 48);
- SARLUC(resetFlag);
- pos += sprintf(arlan_drive_info + pos, "diagnosticInfo\t=\t%s \n", arlan_diagnostic_info_string(dev));
- SARLUC(diagnosticInfo);
- SARLUS(diagnosticOffset);
- SARLUCN(_1, 12);
- SARLUCN(lanCardNodeId, 6);
- SARLUCN(broadcastAddress, 6);
- pos += sprintf(arlan_drive_info + pos, "hardwareType =\t %s \n", arlan_hardware_type_string(dev));
- SARLUC(hardwareType);
- SARLUC(majorHardwareVersion);
- SARLUC(minorHardwareVersion);
- SARLUC(radioModule);
- SARLUC(defaultChannelSet);
- SARLUCN(_2, 47);
-
- /* Control/Status Block - 0x0080 */
- SARLUC(interruptInProgress);
- SARLUC(cntrlRegImage);
-
- SARLUCN(_3, 14);
- SARLUC(commandByte);
- SARLUCN(commandParameter, 15);
-
- /* Receive Status - 0x00a0 */
- SARLUC(rxStatus);
- SARLUC(rxFrmType);
- SARLUS(rxOffset);
- SARLUS(rxLength);
- SARLUCN(rxSrc, 6);
- SARLUC(rxBroadcastFlag);
- SARLUC(rxQuality);
- SARLUC(scrambled);
- SARLUCN(_4, 1);
-
- /* Transmit Status - 0x00b0 */
- SARLUC(txStatus);
- SARLUC(txAckQuality);
- SARLUC(numRetries);
- SARLUCN(_5, 14);
- SARLUCN(registeredRouter, 6);
- SARLUCN(backboneRouter, 6);
- SARLUC(registrationStatus);
- SARLUC(configuredStatusFlag);
- SARLUCN(_6, 1);
- SARLUCN(ultimateDestAddress, 6);
- SARLUCN(immedDestAddress, 6);
- SARLUCN(immedSrcAddress, 6);
- SARLUS(rxSequenceNumber);
- SARLUC(assignedLocaltalkAddress);
- SARLUCN(_7, 27);
-
- /* System Parameter Block */
-
- /* - Driver Parameters (Novell Specific) */
-
- SARLUS(txTimeout);
- SARLUS(transportTime);
- SARLUCN(_8, 4);
-
- /* - Configuration Parameters */
- SARLUC(irqLevel);
- SARLUC(spreadingCode);
- SARLUC(channelSet);
- SARLUC(channelNumber);
- SARLUS(radioNodeId);
- SARLUCN(_9, 2);
- SARLUC(scramblingDisable);
- SARLUC(radioType);
- SARLUS(routerId);
- SARLUCN(_10, 9);
- SARLUC(txAttenuation);
- SARLUIA(systemId);
- SARLUS(globalChecksum);
- SARLUCN(_11, 4);
- SARLUS(maxDatagramSize);
- SARLUS(maxFrameSize);
- SARLUC(maxRetries);
- SARLUC(receiveMode);
- SARLUC(priority);
- SARLUC(rootOrRepeater);
- SARLUCN(specifiedRouter, 6);
- SARLUS(fastPollPeriod);
- SARLUC(pollDecay);
- SARLUSA(fastPollDelay);
- SARLUC(arlThreshold);
- SARLUC(arlDecay);
- SARLUCN(_12, 1);
- SARLUS(specRouterTimeout);
- SARLUCN(_13, 5);
-
- /* Scrambled Area */
- SARLUIA(SID);
- SARLUCN(encryptionKey, 12);
- SARLUIA(_14);
- SARLUSA(waitTime);
- SARLUSA(lParameter);
- SARLUCN(_15, 3);
- SARLUS(headerSize);
- SARLUS(sectionChecksum);
-
- SARLUC(registrationMode);
- SARLUC(registrationFill);
- SARLUS(pollPeriod);
- SARLUS(refreshPeriod);
- SARLSTR(name, 16);
- SARLUCN(NID, 6);
- SARLUC(localTalkAddress);
- SARLUC(codeFormat);
- SARLUC(numChannels);
- SARLUC(channel1);
- SARLUC(channel2);
- SARLUC(channel3);
- SARLUC(channel4);
- SARLUCN(SSCode, 59);
-
-/* SARLUCN( _16, 0x140);
- */
- /* Statistics Block - 0x0300 */
- SARLUC(hostcpuLock);
- SARLUC(lancpuLock);
- SARLUCN(resetTime, 18);
- SARLUIA(numDatagramsTransmitted);
- SARLUIA(numReTransmissions);
- SARLUIA(numFramesDiscarded);
- SARLUIA(numDatagramsReceived);
- SARLUIA(numDuplicateReceivedFrames);
- SARLUIA(numDatagramsDiscarded);
- SARLUS(maxNumReTransmitDatagram);
- SARLUS(maxNumReTransmitFrames);
- SARLUS(maxNumConsecutiveDuplicateFrames);
- /* misaligned here so we have to go to characters */
- SARLUIA(numBytesTransmitted);
- SARLUIA(numBytesReceived);
- SARLUIA(numCRCErrors);
- SARLUIA(numLengthErrors);
- SARLUIA(numAbortErrors);
- SARLUIA(numTXUnderruns);
- SARLUIA(numRXOverruns);
- SARLUIA(numHoldOffs);
- SARLUIA(numFramesTransmitted);
- SARLUIA(numFramesReceived);
- SARLUIA(numReceiveFramesLost);
- SARLUIA(numRXBufferOverflows);
- SARLUIA(numFramesDiscardedAddrMismatch);
- SARLUIA(numFramesDiscardedSIDMismatch);
- SARLUIA(numPollsTransmistted);
- SARLUIA(numPollAcknowledges);
- SARLUIA(numStatusTimeouts);
- SARLUIA(numNACKReceived);
- SARLUS(auxCmd);
- SARLUCN(dumpPtr, 4);
- SARLUC(dumpVal);
- SARLUC(wireTest);
-
- /* next 4 seems too long for procfs, over single page ?
- SARLUCN( _17, 0x86);
- SARLUCN( txBuffer, 0x800);
- SARLUCN( rxBuffer, 0x800);
- SARLUCN( _18, 0x0bff);
- */
-
- pos += sprintf(arlan_drive_info + pos, "rxRing\t=\t0x");
- for (i = 0; i < 0x50; i++)
- pos += sprintf(arlan_drive_info + pos, "%02x", ((char *) priva->conf)[priva->conf->rxOffset + i]);
- pos += sprintf(arlan_drive_info + pos, "\n");
-
- SARLUC(configStatus);
- SARLUC(_22);
- SARLUC(progIOCtrl);
- SARLUC(shareMBase);
- SARLUC(controlRegister);
-
- pos += sprintf(arlan_drive_info + pos, " total %d chars\n", pos);
- if (ctl)
- if (ctl->procname)
- pos += sprintf(arlan_drive_info + pos, " driver name : %s\n", ctl->procname);
-final:
- *lenp = pos;
-
- if (!write)
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
- else
- {
- *lenp = 0;
- return -1;
- }
- return retv;
-}
-
-
-static int arlan_sysctl_info161719(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
-{
- int i;
- int retv, pos, devnum;
- struct arlan_private *priva = NULL;
-
- pos = 0;
- devnum = ctl->procname[5] - '0';
- if (arlan_device[devnum] == NULL)
- {
- pos += sprintf(arlan_drive_info + pos, "No device found here \n");
- goto final;
- }
- else
- priva = netdev_priv(arlan_device[devnum]);
- if (priva == NULL)
- {
- printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n ");
- return -1;
- }
- memcpy_fromio(priva->conf, priva->card, sizeof(struct arlan_shmem));
- SARLUCN(_16, 0xC0);
- SARLUCN(_17, 0x6A);
- SARLUCN(_18, 14);
- SARLUCN(_19, 0x86);
- SARLUCN(_21, 0x3fd);
-
-final:
- *lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
- return retv;
-}
-
-static int arlan_sysctl_infotxRing(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
-{
- int i;
- int retv, pos, devnum;
- struct arlan_private *priva = NULL;
-
- pos = 0;
- devnum = ctl->procname[5] - '0';
- if (arlan_device[devnum] == NULL)
- {
- pos += sprintf(arlan_drive_info + pos, "No device found here \n");
- goto final;
- }
- else
- priva = netdev_priv(arlan_device[devnum]);
- if (priva == NULL)
- {
- printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n ");
- return -1;
- }
- memcpy_fromio(priva->conf, priva->card, sizeof(struct arlan_shmem));
- SARLBNpln(u_char, txBuffer, 0x800);
-final:
- *lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
- return retv;
-}
-
-static int arlan_sysctl_inforxRing(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
-{
- int i;
- int retv, pos, devnum;
- struct arlan_private *priva = NULL;
-
- pos = 0;
- devnum = ctl->procname[5] - '0';
- if (arlan_device[devnum] == NULL)
- {
- pos += sprintf(arlan_drive_info + pos, "No device found here \n");
- goto final;
- } else
- priva = netdev_priv(arlan_device[devnum]);
- if (priva == NULL)
- {
- printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n ");
- return -1;
- }
- memcpy_fromio(priva->conf, priva->card, sizeof(struct arlan_shmem));
- SARLBNpln(u_char, rxBuffer, 0x800);
-final:
- *lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
- return retv;
-}
-
-static int arlan_sysctl_info18(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
-{
- int i;
- int retv, pos, devnum;
- struct arlan_private *priva = NULL;
-
- pos = 0;
- devnum = ctl->procname[5] - '0';
- if (arlan_device[devnum] == NULL)
- {
- pos += sprintf(arlan_drive_info + pos, "No device found here \n");
- goto final;
- }
- else
- priva = netdev_priv(arlan_device[devnum]);
- if (priva == NULL)
- {
- printk(KERN_WARNING " Could not find the device private in arlan procsys, bad\n ");
- return -1;
- }
- memcpy_fromio(priva->conf, priva->card, sizeof(struct arlan_shmem));
- SARLBNpln(u_char, _18, 0x800);
-
-final:
- *lenp = pos;
- retv = proc_dostring(ctl, write, filp, buffer, lenp, ppos);
- return retv;
-}
-
-
-#endif /* #ifdef ARLAN_PROC_SHM_DUMP */
-
-
-static char conf_reset_result[200];
-
-static int arlan_configure(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
-{
- int pos = 0;
- int devnum = ctl->procname[6] - '0';
- struct arlan_private *priv;
-
- if (devnum < 0 || devnum > MAX_ARLANS - 1)
- {
- printk(KERN_WARNING "too strange devnum in procfs parse\n ");
- return -1;
- }
- else if (arlan_device[devnum] != NULL)
- {
- priv = netdev_priv(arlan_device[devnum]);
-
- arlan_command(arlan_device[devnum], ARLAN_COMMAND_CLEAN_AND_CONF);
- }
- else
- return -1;
-
- *lenp = pos;
- return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
-}
-
-static int arlan_sysctl_reset(ctl_table * ctl, int write, struct file *filp,
- void __user *buffer, size_t * lenp, loff_t *ppos)
-{
- int pos = 0;
- int devnum = ctl->procname[5] - '0';
- struct arlan_private *priv;
-
- if (devnum < 0 || devnum > MAX_ARLANS - 1)
- {
- printk(KERN_WARNING "too strange devnum in procfs parse\n ");
- return -1;
- }
- else if (arlan_device[devnum] != NULL)
- {
- priv = netdev_priv(arlan_device[devnum]);
- arlan_command(arlan_device[devnum], ARLAN_COMMAND_CLEAN_AND_RESET);
-
- } else
- return -1;
- *lenp = pos + 3;
- return proc_dostring(ctl, write, filp, buffer, lenp, ppos);
-}
-
-
-/* Place files in /proc/sys/dev/arlan */
-#define CTBLN(num,card,nam) \
- { .ctl_name = num,\
- .procname = #nam,\
- .data = &(arlan_conf[card].nam),\
- .maxlen = sizeof(int), .mode = 0600, .proc_handler = &proc_dointvec}
-#ifdef ARLAN_DEBUGGING
-
-#define ARLAN_PROC_DEBUG_ENTRIES \
- { .ctl_name = 48, .procname = "entry_exit_debug",\
- .data = &arlan_entry_and_exit_debug,\
- .maxlen = sizeof(int), .mode = 0600, .proc_handler = &proc_dointvec},\
- { .ctl_name = 49, .procname = "debug", .data = &arlan_debug,\
- .maxlen = sizeof(int), .mode = 0600, .proc_handler = &proc_dointvec},
-#else
-#define ARLAN_PROC_DEBUG_ENTRIES
-#endif
-
-#define ARLAN_SYSCTL_TABLE_TOTAL(cardNo)\
- CTBLN(1,cardNo,spreadingCode),\
- CTBLN(2,cardNo, channelNumber),\
- CTBLN(3,cardNo, scramblingDisable),\
- CTBLN(4,cardNo, txAttenuation),\
- CTBLN(5,cardNo, systemId), \
- CTBLN(6,cardNo, maxDatagramSize),\
- CTBLN(7,cardNo, maxFrameSize),\
- CTBLN(8,cardNo, maxRetries),\
- CTBLN(9,cardNo, receiveMode),\
- CTBLN(10,cardNo, priority),\
- CTBLN(11,cardNo, rootOrRepeater),\
- CTBLN(12,cardNo, SID),\
- CTBLN(13,cardNo, registrationMode),\
- CTBLN(14,cardNo, registrationFill),\
- CTBLN(15,cardNo, localTalkAddress),\
- CTBLN(16,cardNo, codeFormat),\
- CTBLN(17,cardNo, numChannels),\
- CTBLN(18,cardNo, channel1),\
- CTBLN(19,cardNo, channel2),\
- CTBLN(20,cardNo, channel3),\
- CTBLN(21,cardNo, channel4),\
- CTBLN(22,cardNo, txClear),\
- CTBLN(23,cardNo, txRetries),\
- CTBLN(24,cardNo, txRouting),\
- CTBLN(25,cardNo, txScrambled),\
- CTBLN(26,cardNo, rxParameter),\
- CTBLN(27,cardNo, txTimeoutMs),\
- CTBLN(28,cardNo, waitCardTimeout),\
- CTBLN(29,cardNo, channelSet), \
- {.ctl_name = 30, .procname = "name",\
- .data = arlan_conf[cardNo].siteName,\
- .maxlen = 16, .mode = 0600, .proc_handler = &proc_dostring},\
- CTBLN(31,cardNo,waitTime),\
- CTBLN(32,cardNo,lParameter),\
- CTBLN(33,cardNo,_15),\
- CTBLN(34,cardNo,headerSize),\
- CTBLN(36,cardNo,tx_delay_ms),\
- CTBLN(37,cardNo,retries),\
- CTBLN(38,cardNo,ReTransmitPacketMaxSize),\
- CTBLN(39,cardNo,waitReTransmitPacketMaxSize),\
- CTBLN(40,cardNo,fastReTransCount),\
- CTBLN(41,cardNo,driverRetransmissions),\
- CTBLN(42,cardNo,txAckTimeoutMs),\
- CTBLN(43,cardNo,registrationInterrupts),\
- CTBLN(44,cardNo,hardwareType),\
- CTBLN(45,cardNo,radioType),\
- CTBLN(46,cardNo,writeEEPROM),\
- CTBLN(47,cardNo,writeRadioType),\
- ARLAN_PROC_DEBUG_ENTRIES\
- CTBLN(50,cardNo,in_speed),\
- CTBLN(51,cardNo,out_speed),\
- CTBLN(52,cardNo,in_speed10),\
- CTBLN(53,cardNo,out_speed10),\
- CTBLN(54,cardNo,in_speed_max),\
- CTBLN(55,cardNo,out_speed_max),\
- CTBLN(56,cardNo,measure_rate),\
- CTBLN(57,cardNo,pre_Command_Wait),\
- CTBLN(58,cardNo,rx_tweak1),\
- CTBLN(59,cardNo,rx_tweak2),\
- CTBLN(60,cardNo,tx_queue_len),\
-
-
-
-static ctl_table arlan_conf_table0[] =
-{
- ARLAN_SYSCTL_TABLE_TOTAL(0)
-
-#ifdef ARLAN_PROC_SHM_DUMP
- {
- .ctl_name = 150,
- .procname = "arlan0-txRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_infotxRing,
- },
- {
- .ctl_name = 151,
- .procname = "arlan0-rxRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_inforxRing,
- },
- {
- .ctl_name = 152,
- .procname = "arlan0-18",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_info18,
- },
- {
- .ctl_name = 153,
- .procname = "arlan0-ring",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_info161719,
- },
- {
- .ctl_name = 154,
- .procname = "arlan0-shm-cpy",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_info,
- },
-#endif
- {
- .ctl_name = 155,
- .procname = "config0",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = &arlan_configure
- },
- {
- .ctl_name = 156,
- .procname = "reset0",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_reset,
- },
- { .ctl_name = 0 }
-};
-
-static ctl_table arlan_conf_table1[] =
-{
-
- ARLAN_SYSCTL_TABLE_TOTAL(1)
-
-#ifdef ARLAN_PROC_SHM_DUMP
- {
- .ctl_name = 150,
- .procname = "arlan1-txRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_infotxRing,
- },
- {
- .ctl_name = 151,
- .procname = "arlan1-rxRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_inforxRing,
- },
- {
- .ctl_name = 152,
- .procname = "arlan1-18",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_info18,
- },
- {
- .ctl_name = 153,
- .procname = "arlan1-ring",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_info161719,
- },
- {
- .ctl_name = 154,
- .procname = "arlan1-shm-cpy",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_info,
- },
-#endif
- {
- .ctl_name = 155,
- .procname = "config1",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = &arlan_configure,
- },
- {
- .ctl_name = 156,
- .procname = "reset1",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_reset,
- },
- { .ctl_name = 0 }
-};
-
-static ctl_table arlan_conf_table2[] =
-{
-
- ARLAN_SYSCTL_TABLE_TOTAL(2)
-
-#ifdef ARLAN_PROC_SHM_DUMP
- {
- .ctl_name = 150,
- .procname = "arlan2-txRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_infotxRing,
- },
- {
- .ctl_name = 151,
- .procname = "arlan2-rxRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_inforxRing,
- },
- {
- .ctl_name = 152,
- .procname = "arlan2-18",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_info18,
- },
- {
- .ctl_name = 153,
- .procname = "arlan2-ring",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_info161719,
- },
- {
- .ctl_name = 154,
- .procname = "arlan2-shm-cpy",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_info,
- },
-#endif
- {
- .ctl_name = 155,
- .procname = "config2",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = &arlan_configure,
- },
- {
- .ctl_name = 156,
- .procname = "reset2",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_reset,
- },
- { .ctl_name = 0 }
-};
-
-static ctl_table arlan_conf_table3[] =
-{
-
- ARLAN_SYSCTL_TABLE_TOTAL(3)
-
-#ifdef ARLAN_PROC_SHM_DUMP
- {
- .ctl_name = 150,
- .procname = "arlan3-txRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_infotxRing,
- },
- {
- .ctl_name = 151,
- .procname = "arlan3-rxRing",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_inforxRing,
- },
- {
- .ctl_name = 152,
- .procname = "arlan3-18",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_info18,
- },
- {
- .ctl_name = 153,
- .procname = "arlan3-ring",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_info161719,
- },
- {
- .ctl_name = 154,
- .procname = "arlan3-shm-cpy",
- .data = &arlan_drive_info,
- .maxlen = ARLAN_STR_SIZE,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_info,
- },
-#endif
- {
- .ctl_name = 155,
- .procname = "config3",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = &arlan_configure,
- },
- {
- .ctl_name = 156,
- .procname = "reset3",
- .data = &conf_reset_result,
- .maxlen = 100,
- .mode = 0400,
- .proc_handler = &arlan_sysctl_reset,
- },
- { .ctl_name = 0 }
-};
-
-
-
-static ctl_table arlan_table[] =
-{
- {
- .ctl_name = 0,
- .procname = "arlan0",
- .maxlen = 0,
- .mode = 0600,
- .child = arlan_conf_table0,
- },
- {
- .ctl_name = 0,
- .procname = "arlan1",
- .maxlen = 0,
- .mode = 0600,
- .child = arlan_conf_table1,
- },
- {
- .ctl_name = 0,
- .procname = "arlan2",
- .maxlen = 0,
- .mode = 0600,
- .child = arlan_conf_table2,
- },
- {
- .ctl_name = 0,
- .procname = "arlan3",
- .maxlen = 0,
- .mode = 0600,
- .child = arlan_conf_table3,
- },
- { .ctl_name = 0 }
-};
-
-#else
-
-static ctl_table arlan_table[MAX_ARLANS + 1] =
-{
- { .ctl_name = 0 }
-};
-#endif
-
-
-// static int mmtu = 1234;
-
-static ctl_table arlan_root_table[] =
-{
- {
- .ctl_name = CTL_ARLAN,
- .procname = "arlan",
- .maxlen = 0,
- .mode = 0555,
- .child = arlan_table,
- },
- { .ctl_name = 0 }
-};
-
-/* Make sure that /proc/sys/dev is there */
-//static ctl_table arlan_device_root_table[] =
-//{
-// {CTL_DEV, "dev", NULL, 0, 0555, arlan_root_table},
-// {0}
-//};
-
-
-static struct ctl_table_header *arlan_device_sysctl_header;
-
-int __init init_arlan_proc(void)
-{
-
- int i = 0;
- if (arlan_device_sysctl_header)
- return 0;
- for (i = 0; i < MAX_ARLANS && arlan_device[i]; i++)
- arlan_table[i].ctl_name = i + 1;
- arlan_device_sysctl_header = register_sysctl_table(arlan_root_table);
- if (!arlan_device_sysctl_header)
- return -1;
-
- return 0;
-
-}
-
-void __exit cleanup_arlan_proc(void)
-{
- unregister_sysctl_table(arlan_device_sysctl_header);
- arlan_device_sysctl_header = NULL;
-
-}
-#endif
diff --git a/drivers/net/wireless/arlan.h b/drivers/net/wireless/arlan.h
deleted file mode 100644
index fb3ad51a1caf..000000000000
--- a/drivers/net/wireless/arlan.h
+++ /dev/null
@@ -1,539 +0,0 @@
-/*
- * Copyright (C) 1997 Cullen Jennings
- * Copyright (C) 1998 Elmer.Joandi@ut.ee, +37-255-13500
- * GNU General Public License applies
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/skbuff.h>
-#include <linux/if_ether.h> /* For the statistics structure. */
-#include <linux/if_arp.h> /* For ARPHRD_ETHER */
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/timer.h>
-
-#include <linux/init.h>
-#include <linux/bitops.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <linux/errno.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-
-
-//#define ARLAN_DEBUGGING 1
-
-#define ARLAN_PROC_INTERFACE
-#define MAX_ARLANS 4 /* not more than 4 ! */
-#define ARLAN_PROC_SHM_DUMP /* shows all card registers, makes driver way larger */
-
-#define ARLAN_MAX_MULTICAST_ADDRS 16
-#define ARLAN_RCV_CLEAN 0
-#define ARLAN_RCV_PROMISC 1
-#define ARLAN_RCV_CONTROL 2
-
-#ifdef CONFIG_PROC_FS
-extern int init_arlan_proc(void);
-extern void cleanup_arlan_proc(void);
-#else
-#define init_arlan_proc() ({ 0; })
-#define cleanup_arlan_proc() do { } while (0)
-#endif
-
-extern struct net_device *arlan_device[MAX_ARLANS];
-extern int arlan_debug;
-extern int arlan_entry_debug;
-extern int arlan_exit_debug;
-extern int testMemory;
-extern int arlan_command(struct net_device * dev, int command);
-
-#define SIDUNKNOWN -1
-#define radioNodeIdUNKNOWN -1
-#define irqUNKNOWN 0
-#define debugUNKNOWN 0
-#define testMemoryUNKNOWN 1
-#define spreadingCodeUNKNOWN 0
-#define channelNumberUNKNOWN 0
-#define channelSetUNKNOWN 0
-#define systemIdUNKNOWN -1
-#define registrationModeUNKNOWN -1
-
-
-#define IFDEBUG( L ) if ( (L) & arlan_debug )
-#define ARLAN_FAKE_HDR_LEN 12
-
-#ifdef ARLAN_DEBUGGING
- #define DEBUG 1
- #define ARLAN_ENTRY_EXIT_DEBUGGING 1
- #define ARLAN_DEBUG(a,b) printk(KERN_DEBUG a, b)
-#else
- #define ARLAN_DEBUG(a,b)
-#endif
-
-#define ARLAN_SHMEM_SIZE 0x2000
-
-struct arlan_shmem
-{
- /* Header Signature */
- volatile char textRegion[48];
- volatile u_char resetFlag;
- volatile u_char diagnosticInfo;
- volatile u_short diagnosticOffset;
- volatile u_char _1[12];
- volatile u_char lanCardNodeId[6];
- volatile u_char broadcastAddress[6];
- volatile u_char hardwareType;
- volatile u_char majorHardwareVersion;
- volatile u_char minorHardwareVersion;
- volatile u_char radioModule;// shows EEPROM, can be overridden at 0x111
- volatile u_char defaultChannelSet; // shows EEProm, can be overriiden at 0x10A
- volatile u_char _2[47];
-
- /* Control/Status Block - 0x0080 */
- volatile u_char interruptInProgress; /* not used by lancpu */
- volatile u_char cntrlRegImage; /* not used by lancpu */
- volatile u_char _3[13];
- volatile u_char dumpByte;
- volatile u_char commandByte; /* non-zero = active */
- volatile u_char commandParameter[15];
-
- /* Receive Status - 0x00a0 */
- volatile u_char rxStatus; /* 1- data, 2-control, 0xff - registr change */
- volatile u_char rxFrmType;
- volatile u_short rxOffset;
- volatile u_short rxLength;
- volatile u_char rxSrc[6];
- volatile u_char rxBroadcastFlag;
- volatile u_char rxQuality;
- volatile u_char scrambled;
- volatile u_char _4[1];
-
- /* Transmit Status - 0x00b0 */
- volatile u_char txStatus;
- volatile u_char txAckQuality;
- volatile u_char numRetries;
- volatile u_char _5[14];
- volatile u_char registeredRouter[6];
- volatile u_char backboneRouter[6];
- volatile u_char registrationStatus;
- volatile u_char configuredStatusFlag;
- volatile u_char _6[1];
- volatile u_char ultimateDestAddress[6];
- volatile u_char immedDestAddress[6];
- volatile u_char immedSrcAddress[6];
- volatile u_short rxSequenceNumber;
- volatile u_char assignedLocaltalkAddress;
- volatile u_char _7[27];
-
- /* System Parameter Block */
-
- /* - Driver Parameters (Novell Specific) */
-
- volatile u_short txTimeout;
- volatile u_short transportTime;
- volatile u_char _8[4];
-
- /* - Configuration Parameters */
- volatile u_char irqLevel;
- volatile u_char spreadingCode;
- volatile u_char channelSet;
- volatile u_char channelNumber;
- volatile u_short radioNodeId;
- volatile u_char _9[2];
- volatile u_char scramblingDisable;
- volatile u_char radioType;
- volatile u_short routerId;
- volatile u_char _10[9];
- volatile u_char txAttenuation;
- volatile u_char systemId[4];
- volatile u_short globalChecksum;
- volatile u_char _11[4];
- volatile u_short maxDatagramSize;
- volatile u_short maxFrameSize;
- volatile u_char maxRetries;
- volatile u_char receiveMode;
- volatile u_char priority;
- volatile u_char rootOrRepeater;
- volatile u_char specifiedRouter[6];
- volatile u_short fastPollPeriod;
- volatile u_char pollDecay;
- volatile u_char fastPollDelay[2];
- volatile u_char arlThreshold;
- volatile u_char arlDecay;
- volatile u_char _12[1];
- volatile u_short specRouterTimeout;
- volatile u_char _13[5];
-
- /* Scrambled Area */
- volatile u_char SID[4];
- volatile u_char encryptionKey[12];
- volatile u_char _14[2];
- volatile u_char waitTime[2];
- volatile u_char lParameter[2];
- volatile u_char _15[3];
- volatile u_short headerSize;
- volatile u_short sectionChecksum;
-
- volatile u_char registrationMode;
- volatile u_char registrationFill;
- volatile u_short pollPeriod;
- volatile u_short refreshPeriod;
- volatile u_char name[16];
- volatile u_char NID[6];
- volatile u_char localTalkAddress;
- volatile u_char codeFormat;
- volatile u_char numChannels;
- volatile u_char channel1;
- volatile u_char channel2;
- volatile u_char channel3;
- volatile u_char channel4;
- volatile u_char SSCode[59];
-
- volatile u_char _16[0xC0];
- volatile u_short auxCmd;
- volatile u_char dumpPtr[4];
- volatile u_char dumpVal;
- volatile u_char _17[0x6A];
- volatile u_char wireTest;
- volatile u_char _18[14];
-
- /* Statistics Block - 0x0300 */
- volatile u_char hostcpuLock;
- volatile u_char lancpuLock;
- volatile u_char resetTime[18];
-
- volatile u_char numDatagramsTransmitted[4];
- volatile u_char numReTransmissions[4];
- volatile u_char numFramesDiscarded[4];
- volatile u_char numDatagramsReceived[4];
- volatile u_char numDuplicateReceivedFrames[4];
- volatile u_char numDatagramsDiscarded[4];
-
- volatile u_short maxNumReTransmitDatagram;
- volatile u_short maxNumReTransmitFrames;
- volatile u_short maxNumConsecutiveDuplicateFrames;
- /* misaligned here so we have to go to characters */
-
- volatile u_char numBytesTransmitted[4];
- volatile u_char numBytesReceived[4];
- volatile u_char numCRCErrors[4];
- volatile u_char numLengthErrors[4];
- volatile u_char numAbortErrors[4];
- volatile u_char numTXUnderruns[4];
- volatile u_char numRXOverruns[4];
- volatile u_char numHoldOffs[4];
- volatile u_char numFramesTransmitted[4];
- volatile u_char numFramesReceived[4];
- volatile u_char numReceiveFramesLost[4];
- volatile u_char numRXBufferOverflows[4];
- volatile u_char numFramesDiscardedAddrMismatch[4];
- volatile u_char numFramesDiscardedSIDMismatch[4];
- volatile u_char numPollsTransmistted[4];
- volatile u_char numPollAcknowledges[4];
- volatile u_char numStatusTimeouts[4];
- volatile u_char numNACKReceived[4];
-
- volatile u_char _19[0x86];
-
- volatile u_char txBuffer[0x800];
- volatile u_char rxBuffer[0x800];
-
- volatile u_char _20[0x800];
- volatile u_char _21[0x3fb];
- volatile u_char configStatus;
- volatile u_char _22;
- volatile u_char progIOCtrl;
- volatile u_char shareMBase;
- volatile u_char controlRegister;
-};
-
-struct arlan_conf_stru {
- int spreadingCode;
- int channelSet;
- int channelNumber;
- int scramblingDisable;
- int txAttenuation;
- int systemId;
- int maxDatagramSize;
- int maxFrameSize;
- int maxRetries;
- int receiveMode;
- int priority;
- int rootOrRepeater;
- int SID;
- int radioNodeId;
- int registrationMode;
- int registrationFill;
- int localTalkAddress;
- int codeFormat;
- int numChannels;
- int channel1;
- int channel2;
- int channel3;
- int channel4;
- int txClear;
- int txRetries;
- int txRouting;
- int txScrambled;
- int rxParameter;
- int txTimeoutMs;
- int txAckTimeoutMs;
- int waitCardTimeout;
- int waitTime;
- int lParameter;
- int _15;
- int headerSize;
- int retries;
- int tx_delay_ms;
- int waitReTransmitPacketMaxSize;
- int ReTransmitPacketMaxSize;
- int fastReTransCount;
- int driverRetransmissions;
- int registrationInterrupts;
- int hardwareType;
- int radioType;
- int writeRadioType;
- int writeEEPROM;
- char siteName[17];
- int measure_rate;
- int in_speed;
- int out_speed;
- int in_speed10;
- int out_speed10;
- int in_speed_max;
- int out_speed_max;
- int pre_Command_Wait;
- int rx_tweak1;
- int rx_tweak2;
- int tx_queue_len;
-};
-
-extern struct arlan_conf_stru arlan_conf[MAX_ARLANS];
-
-struct TxParam
-{
- volatile short offset;
- volatile short length;
- volatile u_char dest[6];
- volatile unsigned char clear;
- volatile unsigned char retries;
- volatile unsigned char routing;
- volatile unsigned char scrambled;
-};
-
-#define TX_RING_SIZE 2
-/* Information that need to be kept for each board. */
-struct arlan_private {
- struct arlan_shmem __iomem * card;
- struct arlan_shmem * conf;
-
- struct arlan_conf_stru * Conf;
- int bad;
- int reset;
- unsigned long lastReset;
- struct timer_list timer;
- struct timer_list tx_delay_timer;
- struct timer_list tx_retry_timer;
- struct timer_list rx_check_timer;
-
- int registrationLostCount;
- int reRegisterExp;
- int irq_test_done;
-
- struct TxParam txRing[TX_RING_SIZE];
- char reTransmitBuff[0x800];
- int txLast;
- unsigned ReTransmitRequested;
- unsigned long tx_done_delayed;
- unsigned long registrationLastSeen;
-
- unsigned long tx_last_sent;
- unsigned long tx_last_cleared;
- unsigned long retransmissions;
- unsigned long interrupt_ack_requested;
- spinlock_t lock;
- unsigned long waiting_command_mask;
- unsigned long card_polling_interval;
- unsigned long last_command_buff_free_time;
-
- int under_reset;
- int under_config;
- int rx_command_given;
- int tx_command_given;
- unsigned long interrupt_processing_active;
- unsigned long last_rx_int_ack_time;
- unsigned long in_bytes;
- unsigned long out_bytes;
- unsigned long in_time;
- unsigned long out_time;
- unsigned long in_time10;
- unsigned long out_time10;
- unsigned long in_bytes10;
- unsigned long out_bytes10;
- int init_etherdev_alloc;
-};
-
-
-
-#define ARLAN_CLEAR 0x00
-#define ARLAN_RESET 0x01
-#define ARLAN_CHANNEL_ATTENTION 0x02
-#define ARLAN_INTERRUPT_ENABLE 0x04
-#define ARLAN_CLEAR_INTERRUPT 0x08
-#define ARLAN_POWER 0x40
-#define ARLAN_ACCESS 0x80
-
-#define ARLAN_COM_CONF 0x01
-#define ARLAN_COM_RX_ENABLE 0x03
-#define ARLAN_COM_RX_ABORT 0x04
-#define ARLAN_COM_TX_ENABLE 0x05
-#define ARLAN_COM_TX_ABORT 0x06
-#define ARLAN_COM_NOP 0x07
-#define ARLAN_COM_STANDBY 0x08
-#define ARLAN_COM_ACTIVATE 0x09
-#define ARLAN_COM_GOTO_SLOW_POLL 0x0a
-#define ARLAN_COM_INT 0x80
-
-
-#define TXLAST(dev) (((struct arlan_private *)netdev_priv(dev))->txRing[((struct arlan_private *)netdev_priv(dev))->txLast])
-#define TXHEAD(dev) (((struct arlan_private *)netdev_priv(dev))->txRing[0])
-#define TXTAIL(dev) (((struct arlan_private *)netdev_priv(dev))->txRing[1])
-
-#define TXBuffStart(dev) offsetof(struct arlan_shmem, txBuffer)
-#define TXBuffEnd(dev) offsetof(struct arlan_shmem, xxBuffer)
-
-#define READSHM(to,from,atype) {\
- atype tmp;\
- memcpy_fromio(&(tmp),&(from),sizeof(atype));\
- to = tmp;\
- }
-
-#define READSHMEM(from,atype)\
- atype from; \
- READSHM(from, arlan->from, atype);
-
-#define WRITESHM(to,from,atype) \
- { atype tmpSHM = from;\
- memcpy_toio(&(to),&tmpSHM,sizeof(atype));\
- }
-
-#define DEBUGSHM(levelSHM,stringSHM,stuff,atype) \
- { atype tmpSHM; \
- memcpy_fromio(&tmpSHM,&(stuff),sizeof(atype));\
- IFDEBUG(levelSHM) printk(stringSHM,tmpSHM);\
- }
-
-#define WRITESHMB(to, val) \
- writeb(val,&(to))
-#define READSHMB(to) \
- readb(&(to))
-#define WRITESHMS(to, val) \
- writew(val,&(to))
-#define READSHMS(to) \
- readw(&(to))
-#define WRITESHMI(to, val) \
- writel(val,&(to))
-#define READSHMI(to) \
- readl(&(to))
-
-
-
-
-
-#define registrationBad(dev)\
- ( ( READSHMB(((struct arlan_private *)netdev_priv(dev))->card->registrationMode) > 0) && \
- ( READSHMB(((struct arlan_private *)netdev_priv(dev))->card->registrationStatus) == 0) )
-
-
-#define readControlRegister(dev)\
- READSHMB(((struct arlan_private *)netdev_priv(dev))->card->cntrlRegImage)
-
-#define writeControlRegister(dev, v){\
- WRITESHMB(((struct arlan_private *)netdev_priv(dev))->card->cntrlRegImage ,((v) &0xF) );\
- WRITESHMB(((struct arlan_private *)netdev_priv(dev))->card->controlRegister ,(v) );}
-
-
-#define arlan_interrupt_lancpu(dev) {\
- int cr; \
- \
- cr = readControlRegister(dev);\
- if (cr & ARLAN_CHANNEL_ATTENTION){ \
- writeControlRegister(dev, (cr & ~ARLAN_CHANNEL_ATTENTION));\
- }else \
- writeControlRegister(dev, (cr | ARLAN_CHANNEL_ATTENTION));\
-}
-
-#define clearChannelAttention(dev){ \
- writeControlRegister(dev,readControlRegister(dev) & ~ARLAN_CHANNEL_ATTENTION);}
-#define setHardwareReset(dev) {\
- writeControlRegister(dev,readControlRegister(dev) | ARLAN_RESET);}
-#define clearHardwareReset(dev) {\
- writeControlRegister(dev,readControlRegister(dev) & ~ARLAN_RESET);}
-#define setInterruptEnable(dev){\
- writeControlRegister(dev,readControlRegister(dev) | ARLAN_INTERRUPT_ENABLE) ;}
-#define clearInterruptEnable(dev){\
- writeControlRegister(dev,readControlRegister(dev) & ~ARLAN_INTERRUPT_ENABLE) ;}
-#define setClearInterrupt(dev){\
- writeControlRegister(dev,readControlRegister(dev) | ARLAN_CLEAR_INTERRUPT) ;}
-#define clearClearInterrupt(dev){\
- writeControlRegister(dev,readControlRegister(dev) & ~ARLAN_CLEAR_INTERRUPT);}
-#define setPowerOff(dev){\
- writeControlRegister(dev,readControlRegister(dev) | (ARLAN_POWER && ARLAN_ACCESS));\
- writeControlRegister(dev,readControlRegister(dev) & ~ARLAN_ACCESS);}
-#define setPowerOn(dev){\
- writeControlRegister(dev,readControlRegister(dev) & ~(ARLAN_POWER)); }
-#define arlan_lock_card_access(dev){\
- writeControlRegister(dev,readControlRegister(dev) & ~ARLAN_ACCESS);}
-#define arlan_unlock_card_access(dev){\
- writeControlRegister(dev,readControlRegister(dev) | ARLAN_ACCESS ); }
-
-
-
-
-#define ARLAN_COMMAND_RX 0x000001
-#define ARLAN_COMMAND_NOOP 0x000002
-#define ARLAN_COMMAND_NOOPINT 0x000004
-#define ARLAN_COMMAND_TX 0x000008
-#define ARLAN_COMMAND_CONF 0x000010
-#define ARLAN_COMMAND_RESET 0x000020
-#define ARLAN_COMMAND_TX_ABORT 0x000040
-#define ARLAN_COMMAND_RX_ABORT 0x000080
-#define ARLAN_COMMAND_POWERDOWN 0x000100
-#define ARLAN_COMMAND_POWERUP 0x000200
-#define ARLAN_COMMAND_SLOW_POLL 0x000400
-#define ARLAN_COMMAND_ACTIVATE 0x000800
-#define ARLAN_COMMAND_INT_ACK 0x001000
-#define ARLAN_COMMAND_INT_ENABLE 0x002000
-#define ARLAN_COMMAND_WAIT_NOW 0x004000
-#define ARLAN_COMMAND_LONG_WAIT_NOW 0x008000
-#define ARLAN_COMMAND_STANDBY 0x010000
-#define ARLAN_COMMAND_INT_RACK 0x020000
-#define ARLAN_COMMAND_INT_RENABLE 0x040000
-#define ARLAN_COMMAND_CONF_WAIT 0x080000
-#define ARLAN_COMMAND_TBUSY_CLEAR 0x100000
-#define ARLAN_COMMAND_CLEAN_AND_CONF (ARLAN_COMMAND_TX_ABORT\
- | ARLAN_COMMAND_RX_ABORT\
- | ARLAN_COMMAND_CONF)
-#define ARLAN_COMMAND_CLEAN_AND_RESET (ARLAN_COMMAND_TX_ABORT\
- | ARLAN_COMMAND_RX_ABORT\
- | ARLAN_COMMAND_RESET)
-
-
-
-#define ARLAN_DEBUG_CHAIN_LOCKS 0x00001
-#define ARLAN_DEBUG_RESET 0x00002
-#define ARLAN_DEBUG_TIMING 0x00004
-#define ARLAN_DEBUG_CARD_STATE 0x00008
-#define ARLAN_DEBUG_TX_CHAIN 0x00010
-#define ARLAN_DEBUG_MULTICAST 0x00020
-#define ARLAN_DEBUG_HEADER_DUMP 0x00040
-#define ARLAN_DEBUG_INTERRUPT 0x00080
-#define ARLAN_DEBUG_STARTUP 0x00100
-#define ARLAN_DEBUG_SHUTDOWN 0x00200
-
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index d26e7b485315..910c10028b14 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -1,8 +1,68 @@
+# SPDX-License-Identifier: ISC
config ATH_COMMON
- tristate "Atheros Wireless Cards"
- depends on ATH5K || ATH9K || AR9170_USB
+ tristate
+
+config WLAN_VENDOR_ATH
+ bool "Atheros/Qualcomm devices"
+ default y
+ help
+ If you have a wireless card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all the
+ questions about these cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+ For more information and documentation on this module you can visit:
+
+ https://wireless.wiki.kernel.org/en/users/Drivers/ath
+
+ For information on all Atheros wireless drivers visit:
+
+ https://wireless.wiki.kernel.org/en/users/Drivers/Atheros
+
+if WLAN_VENDOR_ATH
+
+config ATH_DEBUG
+ bool "Atheros wireless debugging"
+ help
+ Say Y, if you want to debug atheros wireless drivers.
+ Right now only ath9k makes use of this.
+
+config ATH_TRACEPOINTS
+ bool "Atheros wireless tracing"
+ depends on ATH_DEBUG
+ depends on EVENT_TRACING
+ help
+ This option enables tracepoints for atheros wireless drivers.
+ Currently, ath9k makes use of this facility.
+
+config ATH_REG_DYNAMIC_USER_REG_HINTS
+ bool "Atheros dynamic user regulatory hints"
+ depends on CFG80211_CERTIFICATION_ONUS
+ default n
+ help
+ Say N. This should only be enabled in countries where
+ this feature is explicitly allowed and only on cards that
+ specifically have been tested for this.
+
+config ATH_REG_DYNAMIC_USER_CERT_TESTING
+ bool "Atheros dynamic user regulatory testing"
+ depends on ATH_REG_DYNAMIC_USER_REG_HINTS && CFG80211_CERTIFICATION_ONUS
+ default n
+ help
+ Say N. This should only be enabled on systems
+ undergoing certification testing.
source "drivers/net/wireless/ath/ath5k/Kconfig"
source "drivers/net/wireless/ath/ath9k/Kconfig"
-source "drivers/net/wireless/ath/ar9170/Kconfig"
+source "drivers/net/wireless/ath/carl9170/Kconfig"
+source "drivers/net/wireless/ath/ath6kl/Kconfig"
+source "drivers/net/wireless/ath/ar5523/Kconfig"
+source "drivers/net/wireless/ath/wil6210/Kconfig"
+source "drivers/net/wireless/ath/ath10k/Kconfig"
+source "drivers/net/wireless/ath/wcn36xx/Kconfig"
+source "drivers/net/wireless/ath/ath11k/Kconfig"
+source "drivers/net/wireless/ath/ath12k/Kconfig"
+endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 4bb0132ada37..8d6e6e218d24 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -1,6 +1,25 @@
+# SPDX-License-Identifier: ISC
obj-$(CONFIG_ATH5K) += ath5k/
-obj-$(CONFIG_ATH9K) += ath9k/
-obj-$(CONFIG_AR9170_USB) += ar9170/
+obj-$(CONFIG_ATH9K_HW) += ath9k/
+obj-$(CONFIG_CARL9170) += carl9170/
+obj-$(CONFIG_ATH6KL) += ath6kl/
+obj-$(CONFIG_AR5523) += ar5523/
+obj-$(CONFIG_WIL6210) += wil6210/
+obj-$(CONFIG_ATH10K) += ath10k/
+obj-$(CONFIG_WCN36XX) += wcn36xx/
+obj-$(CONFIG_ATH11K) += ath11k/
+obj-$(CONFIG_ATH12K) += ath12k/
obj-$(CONFIG_ATH_COMMON) += ath.o
-ath-objs := main.o regd.o
+
+ath-objs := main.o \
+ regd.o \
+ hw.o \
+ key.o \
+ dfs_pattern_detector.o \
+ dfs_pri_detector.o
+
+ath-$(CONFIG_ATH_DEBUG) += debug.o
+ath-$(CONFIG_ATH_TRACEPOINTS) += trace.o
+
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ar5523/Kconfig b/drivers/net/wireless/ath/ar5523/Kconfig
new file mode 100644
index 000000000000..0d838c1e7b12
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: ISC
+config AR5523
+ tristate "Atheros AR5523 wireless driver support"
+ depends on MAC80211 && USB
+ select ATH_COMMON
+ select FW_LOADER
+ help
+ This module add support for AR5523 based USB dongles such as D-Link
+ DWL-G132, Netgear WPN111 and many more.
diff --git a/drivers/net/wireless/ath/ar5523/Makefile b/drivers/net/wireless/ath/ar5523/Makefile
new file mode 100644
index 000000000000..34efa5772096
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: ISC
+obj-$(CONFIG_AR5523) := ar5523.o
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
new file mode 100644
index 000000000000..1230e6278f23
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -0,0 +1,1827 @@
+/*
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
+ * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This driver is based on the uath driver written by Damien Bergamini for
+ * OpenBSD, who did black-box analysis of the Windows binary driver to find
+ * out how the hardware works. It contains a lot magic numbers because of
+ * that and only has minimal functionality.
+ */
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/skbuff.h>
+#include <linux/usb.h>
+#include <net/mac80211.h>
+
+#include "ar5523.h"
+#include "ar5523_hw.h"
+
+/*
+ * Various supported device vendors/products.
+ * UB51: AR5005UG 802.11b/g, UB52: AR5005UX 802.11a/b/g
+ */
+
+static int ar5523_submit_rx_cmd(struct ar5523 *ar);
+static void ar5523_data_tx_pkt_put(struct ar5523 *ar);
+
+static void ar5523_read_reply(struct ar5523 *ar, struct ar5523_cmd_hdr *hdr,
+ struct ar5523_tx_cmd *cmd)
+{
+ int dlen, olen;
+ __be32 *rp;
+
+ dlen = be32_to_cpu(hdr->len) - sizeof(*hdr);
+
+ if (dlen < 0) {
+ WARN_ON(1);
+ goto out;
+ }
+
+ ar5523_dbg(ar, "Code = %d len = %d\n", be32_to_cpu(hdr->code) & 0xff,
+ dlen);
+
+ rp = (__be32 *)(hdr + 1);
+ if (dlen >= sizeof(u32)) {
+ olen = be32_to_cpu(rp[0]);
+ dlen -= sizeof(u32);
+ if (olen == 0) {
+ /* convention is 0 =>'s one word */
+ olen = sizeof(u32);
+ }
+ } else
+ olen = 0;
+
+ if (cmd->odata) {
+ if (cmd->olen < olen) {
+ ar5523_err(ar, "olen too small %d < %d\n",
+ cmd->olen, olen);
+ cmd->olen = 0;
+ cmd->res = -EOVERFLOW;
+ } else {
+ cmd->olen = olen;
+ memcpy(cmd->odata, &rp[1], olen);
+ cmd->res = 0;
+ }
+ }
+
+out:
+ complete(&cmd->done);
+}
+
+static void ar5523_cmd_rx_cb(struct urb *urb)
+{
+ struct ar5523 *ar = urb->context;
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+ struct ar5523_cmd_hdr *hdr = ar->rx_cmd_buf;
+ int dlen;
+ u32 code, hdrlen;
+
+ if (urb->status) {
+ if (urb->status != -ESHUTDOWN)
+ ar5523_err(ar, "RX USB error %d.\n", urb->status);
+ goto skip;
+ }
+
+ if (urb->actual_length < sizeof(struct ar5523_cmd_hdr)) {
+ ar5523_err(ar, "RX USB too short.\n");
+ goto skip;
+ }
+
+ ar5523_dbg(ar, "%s code %02x priv %d\n", __func__,
+ be32_to_cpu(hdr->code) & 0xff, hdr->priv);
+
+ code = be32_to_cpu(hdr->code);
+ hdrlen = be32_to_cpu(hdr->len);
+
+ switch (code & 0xff) {
+ default:
+ /* reply to a read command */
+ if (hdr->priv != AR5523_CMD_ID) {
+ ar5523_err(ar, "Unexpected command id: %02x\n",
+ code & 0xff);
+ goto skip;
+ }
+ ar5523_read_reply(ar, hdr, cmd);
+ break;
+
+ case WDCMSG_DEVICE_AVAIL:
+ ar5523_dbg(ar, "WDCMSG_DEVICE_AVAIL\n");
+ cmd->res = 0;
+ cmd->olen = 0;
+ complete(&cmd->done);
+ break;
+
+ case WDCMSG_SEND_COMPLETE:
+ ar5523_dbg(ar, "WDCMSG_SEND_COMPLETE: %d pending\n",
+ atomic_read(&ar->tx_nr_pending));
+ if (!test_bit(AR5523_HW_UP, &ar->flags))
+ ar5523_dbg(ar, "Unexpected WDCMSG_SEND_COMPLETE\n");
+ else {
+ mod_timer(&ar->tx_wd_timer,
+ jiffies + AR5523_TX_WD_TIMEOUT);
+ ar5523_data_tx_pkt_put(ar);
+
+ }
+ break;
+
+ case WDCMSG_TARGET_START:
+ /* This command returns a bogus id so it needs special
+ handling */
+ dlen = hdrlen - sizeof(*hdr);
+ if (dlen != (int)sizeof(u32)) {
+ ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START");
+ return;
+ }
+ if (!cmd->odata) {
+ ar5523_err(ar, "Unexpected WDCMSG_TARGET_START reply");
+ return;
+ }
+ memcpy(cmd->odata, hdr + 1, sizeof(u32));
+ cmd->olen = sizeof(u32);
+ cmd->res = 0;
+ complete(&cmd->done);
+ break;
+
+ case WDCMSG_STATS_UPDATE:
+ ar5523_dbg(ar, "WDCMSG_STATS_UPDATE\n");
+ break;
+ }
+
+skip:
+ ar5523_submit_rx_cmd(ar);
+}
+
+static int ar5523_alloc_rx_cmd(struct ar5523 *ar)
+{
+ ar->rx_cmd_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!ar->rx_cmd_urb)
+ return -ENOMEM;
+
+ ar->rx_cmd_buf = usb_alloc_coherent(ar->dev, AR5523_MAX_RXCMDSZ,
+ GFP_KERNEL,
+ &ar->rx_cmd_urb->transfer_dma);
+ if (!ar->rx_cmd_buf) {
+ usb_free_urb(ar->rx_cmd_urb);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void ar5523_cancel_rx_cmd(struct ar5523 *ar)
+{
+ usb_kill_urb(ar->rx_cmd_urb);
+}
+
+static void ar5523_free_rx_cmd(struct ar5523 *ar)
+{
+ usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ,
+ ar->rx_cmd_buf, ar->rx_cmd_urb->transfer_dma);
+ usb_free_urb(ar->rx_cmd_urb);
+}
+
+static int ar5523_submit_rx_cmd(struct ar5523 *ar)
+{
+ int error;
+
+ usb_fill_bulk_urb(ar->rx_cmd_urb, ar->dev,
+ ar5523_cmd_rx_pipe(ar->dev), ar->rx_cmd_buf,
+ AR5523_MAX_RXCMDSZ, ar5523_cmd_rx_cb, ar);
+ ar->rx_cmd_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ error = usb_submit_urb(ar->rx_cmd_urb, GFP_ATOMIC);
+ if (error) {
+ if (error != -ENODEV)
+ ar5523_err(ar, "error %d when submitting rx urb\n",
+ error);
+ return error;
+ }
+ return 0;
+}
+
+/*
+ * Command submitted cb
+ */
+static void ar5523_cmd_tx_cb(struct urb *urb)
+{
+ struct ar5523_tx_cmd *cmd = urb->context;
+ struct ar5523 *ar = cmd->ar;
+
+ if (urb->status) {
+ ar5523_err(ar, "Failed to TX command. Status = %d\n",
+ urb->status);
+ cmd->res = urb->status;
+ complete(&cmd->done);
+ return;
+ }
+
+ if (!(cmd->flags & AR5523_CMD_FLAG_READ)) {
+ cmd->res = 0;
+ complete(&cmd->done);
+ }
+}
+
+static void ar5523_cancel_tx_cmd(struct ar5523 *ar)
+{
+ usb_kill_urb(ar->tx_cmd.urb_tx);
+}
+
+static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
+ int ilen, void *odata, int olen, int flags)
+{
+ struct ar5523_cmd_hdr *hdr;
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+ int xferlen, error;
+
+ /* always bulk-out a multiple of 4 bytes */
+ xferlen = (sizeof(struct ar5523_cmd_hdr) + ilen + 3) & ~3;
+
+ hdr = cmd->buf_tx;
+ memset(hdr, 0, sizeof(struct ar5523_cmd_hdr));
+ hdr->len = cpu_to_be32(xferlen);
+ hdr->code = cpu_to_be32(code);
+ hdr->priv = AR5523_CMD_ID;
+
+ if (flags & AR5523_CMD_FLAG_MAGIC)
+ hdr->magic = cpu_to_be32(1 << 24);
+ if (ilen)
+ memcpy(hdr + 1, idata, ilen);
+
+ cmd->odata = odata;
+ cmd->olen = olen;
+ cmd->flags = flags;
+
+ ar5523_dbg(ar, "do cmd %02x\n", code);
+
+ usb_fill_bulk_urb(cmd->urb_tx, ar->dev, ar5523_cmd_tx_pipe(ar->dev),
+ cmd->buf_tx, xferlen, ar5523_cmd_tx_cb, cmd);
+ cmd->urb_tx->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ error = usb_submit_urb(cmd->urb_tx, GFP_KERNEL);
+ if (error) {
+ ar5523_err(ar, "could not send command 0x%x, error=%d\n",
+ code, error);
+ return error;
+ }
+
+ if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) {
+ ar5523_cancel_tx_cmd(ar);
+ cmd->odata = NULL;
+ ar5523_err(ar, "timeout waiting for command %02x reply\n",
+ code);
+ cmd->res = -ETIMEDOUT;
+ }
+ return cmd->res;
+}
+
+static int ar5523_cmd_write(struct ar5523 *ar, u32 code, const void *data,
+ int len, int flags)
+{
+ flags &= ~AR5523_CMD_FLAG_READ;
+ return ar5523_cmd(ar, code, data, len, NULL, 0, flags);
+}
+
+static int ar5523_cmd_read(struct ar5523 *ar, u32 code, const void *idata,
+ int ilen, void *odata, int olen, int flags)
+{
+ flags |= AR5523_CMD_FLAG_READ;
+ return ar5523_cmd(ar, code, idata, ilen, odata, olen, flags);
+}
+
+static int ar5523_config(struct ar5523 *ar, u32 reg, u32 val)
+{
+ struct ar5523_write_mac write;
+ int error;
+
+ write.reg = cpu_to_be32(reg);
+ write.len = cpu_to_be32(0); /* 0 = single write */
+ *(__be32 *)write.data = cpu_to_be32(val);
+
+ error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write,
+ 3 * sizeof(u32), 0);
+ if (error != 0)
+ ar5523_err(ar, "could not write register 0x%02x\n", reg);
+ return error;
+}
+
+static int ar5523_config_multi(struct ar5523 *ar, u32 reg, const void *data,
+ int len)
+{
+ struct ar5523_write_mac write;
+ int error;
+
+ write.reg = cpu_to_be32(reg);
+ write.len = cpu_to_be32(len);
+ memcpy(write.data, data, len);
+
+ /* properly handle the case where len is zero (reset) */
+ error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write,
+ (len == 0) ? sizeof(u32) : 2 * sizeof(u32) + len, 0);
+ if (error != 0)
+ ar5523_err(ar, "could not write %d bytes to register 0x%02x\n",
+ len, reg);
+ return error;
+}
+
+static int ar5523_get_status(struct ar5523 *ar, u32 which, void *odata,
+ int olen)
+{
+ int error;
+ __be32 which_be;
+
+ which_be = cpu_to_be32(which);
+ error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_STATUS,
+ &which_be, sizeof(which_be), odata, olen, AR5523_CMD_FLAG_MAGIC);
+ if (error != 0)
+ ar5523_err(ar, "could not read EEPROM offset 0x%02x\n", which);
+ return error;
+}
+
+static int ar5523_get_capability(struct ar5523 *ar, u32 cap, u32 *val)
+{
+ int error;
+ __be32 cap_be, val_be;
+
+ cap_be = cpu_to_be32(cap);
+ error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_CAPABILITY, &cap_be,
+ sizeof(cap_be), &val_be, sizeof(__be32),
+ AR5523_CMD_FLAG_MAGIC);
+ if (error != 0) {
+ ar5523_err(ar, "could not read capability %u\n", cap);
+ return error;
+ }
+ *val = be32_to_cpu(val_be);
+ return error;
+}
+
+static int ar5523_get_devcap(struct ar5523 *ar)
+{
+#define GETCAP(x) do { \
+ error = ar5523_get_capability(ar, x, &cap); \
+ if (error != 0) \
+ return error; \
+ ar5523_info(ar, "Cap: " \
+ "%s=0x%08x\n", #x, cap); \
+} while (0)
+ int error;
+ u32 cap;
+
+ /* collect device capabilities */
+ GETCAP(CAP_TARGET_VERSION);
+ GETCAP(CAP_TARGET_REVISION);
+ GETCAP(CAP_MAC_VERSION);
+ GETCAP(CAP_MAC_REVISION);
+ GETCAP(CAP_PHY_REVISION);
+ GETCAP(CAP_ANALOG_5GHz_REVISION);
+ GETCAP(CAP_ANALOG_2GHz_REVISION);
+
+ GETCAP(CAP_REG_DOMAIN);
+ GETCAP(CAP_REG_CAP_BITS);
+ GETCAP(CAP_WIRELESS_MODES);
+ GETCAP(CAP_CHAN_SPREAD_SUPPORT);
+ GETCAP(CAP_COMPRESS_SUPPORT);
+ GETCAP(CAP_BURST_SUPPORT);
+ GETCAP(CAP_FAST_FRAMES_SUPPORT);
+ GETCAP(CAP_CHAP_TUNING_SUPPORT);
+ GETCAP(CAP_TURBOG_SUPPORT);
+ GETCAP(CAP_TURBO_PRIME_SUPPORT);
+ GETCAP(CAP_DEVICE_TYPE);
+ GETCAP(CAP_WME_SUPPORT);
+ GETCAP(CAP_TOTAL_QUEUES);
+ GETCAP(CAP_CONNECTION_ID_MAX);
+
+ GETCAP(CAP_LOW_5GHZ_CHAN);
+ GETCAP(CAP_HIGH_5GHZ_CHAN);
+ GETCAP(CAP_LOW_2GHZ_CHAN);
+ GETCAP(CAP_HIGH_2GHZ_CHAN);
+ GETCAP(CAP_TWICE_ANTENNAGAIN_5G);
+ GETCAP(CAP_TWICE_ANTENNAGAIN_2G);
+
+ GETCAP(CAP_CIPHER_AES_CCM);
+ GETCAP(CAP_CIPHER_TKIP);
+ GETCAP(CAP_MIC_TKIP);
+ return 0;
+}
+
+static int ar5523_set_ledsteady(struct ar5523 *ar, int lednum, int ledmode)
+{
+ struct ar5523_cmd_ledsteady led;
+
+ led.lednum = cpu_to_be32(lednum);
+ led.ledmode = cpu_to_be32(ledmode);
+
+ ar5523_dbg(ar, "set %s led %s (steady)\n",
+ (lednum == UATH_LED_LINK) ? "link" : "activity",
+ ledmode ? "on" : "off");
+ return ar5523_cmd_write(ar, WDCMSG_SET_LED_STEADY, &led, sizeof(led),
+ 0);
+}
+
+static int ar5523_set_rxfilter(struct ar5523 *ar, u32 bits, u32 op)
+{
+ struct ar5523_cmd_rx_filter rxfilter;
+
+ rxfilter.bits = cpu_to_be32(bits);
+ rxfilter.op = cpu_to_be32(op);
+
+ ar5523_dbg(ar, "setting Rx filter=0x%x flags=0x%x\n", bits, op);
+ return ar5523_cmd_write(ar, WDCMSG_RX_FILTER, &rxfilter,
+ sizeof(rxfilter), 0);
+}
+
+static int ar5523_reset_tx_queues(struct ar5523 *ar)
+{
+ __be32 qid = cpu_to_be32(0);
+
+ ar5523_dbg(ar, "resetting Tx queue\n");
+ return ar5523_cmd_write(ar, WDCMSG_RELEASE_TX_QUEUE,
+ &qid, sizeof(qid), 0);
+}
+
+static int ar5523_set_chan(struct ar5523 *ar)
+{
+ struct ieee80211_conf *conf = &ar->hw->conf;
+
+ struct ar5523_cmd_reset reset;
+
+ memset(&reset, 0, sizeof(reset));
+ reset.flags |= cpu_to_be32(UATH_CHAN_2GHZ);
+ reset.flags |= cpu_to_be32(UATH_CHAN_OFDM);
+ reset.freq = cpu_to_be32(conf->chandef.chan->center_freq);
+ reset.maxrdpower = cpu_to_be32(50); /* XXX */
+ reset.channelchange = cpu_to_be32(1);
+ reset.keeprccontent = cpu_to_be32(0);
+
+ ar5523_dbg(ar, "set chan flags 0x%x freq %d\n",
+ be32_to_cpu(reset.flags),
+ conf->chandef.chan->center_freq);
+ return ar5523_cmd_write(ar, WDCMSG_RESET, &reset, sizeof(reset), 0);
+}
+
+static int ar5523_queue_init(struct ar5523 *ar)
+{
+ struct ar5523_cmd_txq_setup qinfo;
+
+ ar5523_dbg(ar, "setting up Tx queue\n");
+ qinfo.qid = cpu_to_be32(0);
+ qinfo.len = cpu_to_be32(sizeof(qinfo.attr));
+ qinfo.attr.priority = cpu_to_be32(0); /* XXX */
+ qinfo.attr.aifs = cpu_to_be32(3);
+ qinfo.attr.logcwmin = cpu_to_be32(4);
+ qinfo.attr.logcwmax = cpu_to_be32(10);
+ qinfo.attr.bursttime = cpu_to_be32(0);
+ qinfo.attr.mode = cpu_to_be32(0);
+ qinfo.attr.qflags = cpu_to_be32(1); /* XXX? */
+ return ar5523_cmd_write(ar, WDCMSG_SETUP_TX_QUEUE, &qinfo,
+ sizeof(qinfo), 0);
+}
+
+static int ar5523_switch_chan(struct ar5523 *ar)
+{
+ int error;
+
+ error = ar5523_set_chan(ar);
+ if (error) {
+ ar5523_err(ar, "could not set chan, error %d\n", error);
+ goto out_err;
+ }
+
+ /* reset Tx rings */
+ error = ar5523_reset_tx_queues(ar);
+ if (error) {
+ ar5523_err(ar, "could not reset Tx queues, error %d\n",
+ error);
+ goto out_err;
+ }
+ /* set Tx rings WME properties */
+ error = ar5523_queue_init(ar);
+ if (error)
+ ar5523_err(ar, "could not init wme, error %d\n", error);
+
+out_err:
+ return error;
+}
+
+static void ar5523_rx_data_put(struct ar5523 *ar,
+ struct ar5523_rx_data *data)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+ list_move(&data->list, &ar->rx_data_free);
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+}
+
+static void ar5523_data_rx_cb(struct urb *urb)
+{
+ struct ar5523_rx_data *data = urb->context;
+ struct ar5523 *ar = data->ar;
+ struct ar5523_rx_desc *desc;
+ struct ar5523_chunk *chunk;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_rx_status *rx_status;
+ u32 rxlen;
+ int usblen = urb->actual_length;
+ int hdrlen, pad;
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ /* sync/async unlink faults aren't errors */
+ if (urb->status) {
+ if (urb->status != -ESHUTDOWN)
+ ar5523_err(ar, "%s: USB err: %d\n", __func__,
+ urb->status);
+ goto skip;
+ }
+
+ if (usblen < AR5523_MIN_RXBUFSZ) {
+ ar5523_err(ar, "RX: wrong xfer size (usblen=%d)\n", usblen);
+ goto skip;
+ }
+
+ chunk = (struct ar5523_chunk *) data->skb->data;
+
+ if (((chunk->flags & UATH_CFLAGS_FINAL) == 0) ||
+ chunk->seqnum != 0) {
+ ar5523_dbg(ar, "RX: No final flag. s: %d f: %02x l: %d\n",
+ chunk->seqnum, chunk->flags,
+ be16_to_cpu(chunk->length));
+ goto skip;
+ }
+
+ /* Rx descriptor is located at the end, 32-bit aligned */
+ desc = (struct ar5523_rx_desc *)
+ (data->skb->data + usblen - sizeof(struct ar5523_rx_desc));
+
+ rxlen = be32_to_cpu(desc->len);
+ if (rxlen > ar->rxbufsz) {
+ ar5523_dbg(ar, "RX: Bad descriptor (len=%d)\n",
+ be32_to_cpu(desc->len));
+ goto skip;
+ }
+
+ if (!rxlen) {
+ ar5523_dbg(ar, "RX: rxlen is 0\n");
+ goto skip;
+ }
+
+ if (be32_to_cpu(desc->status) != 0) {
+ ar5523_dbg(ar, "Bad RX status (0x%x len = %d). Skip\n",
+ be32_to_cpu(desc->status), be32_to_cpu(desc->len));
+ goto skip;
+ }
+
+ skb_reserve(data->skb, sizeof(*chunk));
+ skb_put(data->skb, rxlen - sizeof(struct ar5523_rx_desc));
+
+ hdrlen = ieee80211_get_hdrlen_from_skb(data->skb);
+ if (!IS_ALIGNED(hdrlen, 4)) {
+ ar5523_dbg(ar, "eek, alignment workaround activated\n");
+ pad = ALIGN(hdrlen, 4) - hdrlen;
+ memmove(data->skb->data + pad, data->skb->data, hdrlen);
+ skb_pull(data->skb, pad);
+ skb_put(data->skb, pad);
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(data->skb);
+ memset(rx_status, 0, sizeof(*rx_status));
+ rx_status->freq = be32_to_cpu(desc->channel);
+ rx_status->band = hw->conf.chandef.chan->band;
+ rx_status->signal = -95 + be32_to_cpu(desc->rssi);
+
+ ieee80211_rx_irqsafe(hw, data->skb);
+ data->skb = NULL;
+
+skip:
+ if (data->skb) {
+ dev_kfree_skb_irq(data->skb);
+ data->skb = NULL;
+ }
+
+ ar5523_rx_data_put(ar, data);
+ if (atomic_inc_return(&ar->rx_data_free_cnt) >=
+ AR5523_RX_DATA_REFILL_COUNT &&
+ test_bit(AR5523_HW_UP, &ar->flags))
+ queue_work(ar->wq, &ar->rx_refill_work);
+}
+
+static void ar5523_rx_refill_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, rx_refill_work);
+ struct ar5523_rx_data *data;
+ unsigned long flags;
+ int error;
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ do {
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+
+ if (!list_empty(&ar->rx_data_free))
+ data = (struct ar5523_rx_data *) ar->rx_data_free.next;
+ else
+ data = NULL;
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+
+ if (!data)
+ goto done;
+
+ data->skb = alloc_skb(ar->rxbufsz, GFP_KERNEL);
+ if (!data->skb) {
+ ar5523_err(ar, "could not allocate rx skbuff\n");
+ return;
+ }
+
+ usb_fill_bulk_urb(data->urb, ar->dev,
+ ar5523_data_rx_pipe(ar->dev), data->skb->data,
+ ar->rxbufsz, ar5523_data_rx_cb, data);
+
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+ list_move(&data->list, &ar->rx_data_used);
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+ atomic_dec(&ar->rx_data_free_cnt);
+
+ error = usb_submit_urb(data->urb, GFP_KERNEL);
+ if (error) {
+ kfree_skb(data->skb);
+ if (error != -ENODEV)
+ ar5523_err(ar, "Err sending rx data urb %d\n",
+ error);
+ ar5523_rx_data_put(ar, data);
+ atomic_inc(&ar->rx_data_free_cnt);
+ return;
+ }
+
+ } while (true);
+done:
+ return;
+}
+
+static void ar5523_cancel_rx_bufs(struct ar5523 *ar)
+{
+ struct ar5523_rx_data *data;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&ar->rx_data_list_lock, flags);
+ if (!list_empty(&ar->rx_data_used))
+ data = (struct ar5523_rx_data *) ar->rx_data_used.next;
+ else
+ data = NULL;
+ spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
+
+ if (!data)
+ break;
+
+ usb_kill_urb(data->urb);
+ list_move(&data->list, &ar->rx_data_free);
+ atomic_inc(&ar->rx_data_free_cnt);
+ } while (data);
+}
+
+static void ar5523_free_rx_bufs(struct ar5523 *ar)
+{
+ struct ar5523_rx_data *data;
+
+ ar5523_cancel_rx_bufs(ar);
+ while (!list_empty(&ar->rx_data_free)) {
+ data = (struct ar5523_rx_data *) ar->rx_data_free.next;
+ list_del(&data->list);
+ usb_free_urb(data->urb);
+ }
+}
+
+static int ar5523_alloc_rx_bufs(struct ar5523 *ar)
+{
+ int i;
+
+ for (i = 0; i < AR5523_RX_DATA_COUNT; i++) {
+ struct ar5523_rx_data *data = &ar->rx_data[i];
+
+ data->ar = ar;
+ data->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!data->urb)
+ goto err;
+ list_add_tail(&data->list, &ar->rx_data_free);
+ atomic_inc(&ar->rx_data_free_cnt);
+ }
+ return 0;
+
+err:
+ ar5523_free_rx_bufs(ar);
+ return -ENOMEM;
+}
+
+static void ar5523_data_tx_pkt_put(struct ar5523 *ar)
+{
+ atomic_dec(&ar->tx_nr_total);
+ if (!atomic_dec_return(&ar->tx_nr_pending)) {
+ timer_delete(&ar->tx_wd_timer);
+ wake_up(&ar->tx_flush_waitq);
+ }
+
+ if (atomic_read(&ar->tx_nr_total) < AR5523_TX_DATA_RESTART_COUNT) {
+ ar5523_dbg(ar, "restart tx queue\n");
+ ieee80211_wake_queues(ar->hw);
+ }
+}
+
+static void ar5523_data_tx_cb(struct urb *urb)
+{
+ struct sk_buff *skb = urb->context;
+ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+ struct ar5523_tx_data *data = (struct ar5523_tx_data *)
+ txi->driver_data;
+ struct ar5523 *ar = data->ar;
+ unsigned long flags;
+
+ ar5523_dbg(ar, "data tx urb completed: %d\n", urb->status);
+
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_del(&data->list);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+
+ if (urb->status) {
+ ar5523_dbg(ar, "%s: urb status: %d\n", __func__, urb->status);
+ ar5523_data_tx_pkt_put(ar);
+ ieee80211_free_txskb(ar->hw, skb);
+ } else {
+ skb_pull(skb, sizeof(struct ar5523_tx_desc) + sizeof(__be32));
+ ieee80211_tx_status_irqsafe(ar->hw, skb);
+ }
+ usb_free_urb(urb);
+}
+
+static void ar5523_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
+ struct ar5523_tx_data *data = (struct ar5523_tx_data *)
+ txi->driver_data;
+ struct ar5523 *ar = hw->priv;
+ unsigned long flags;
+
+ ar5523_dbg(ar, "tx called\n");
+ if (atomic_inc_return(&ar->tx_nr_total) >= AR5523_TX_DATA_COUNT) {
+ ar5523_dbg(ar, "tx queue full\n");
+ ar5523_dbg(ar, "stop queues (tot %d pend %d)\n",
+ atomic_read(&ar->tx_nr_total),
+ atomic_read(&ar->tx_nr_pending));
+ ieee80211_stop_queues(hw);
+ }
+
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_add_tail(&data->list, &ar->tx_queue_pending);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+
+ ieee80211_queue_work(ar->hw, &ar->tx_work);
+}
+
+static void ar5523_tx_work_locked(struct ar5523 *ar)
+{
+ struct ar5523_tx_data *data;
+ struct ar5523_tx_desc *desc;
+ struct ar5523_chunk *chunk;
+ struct ieee80211_tx_info *txi;
+ struct urb *urb;
+ struct sk_buff *skb;
+ int error = 0, paylen;
+ u32 txqid;
+ unsigned long flags;
+
+ BUILD_BUG_ON(sizeof(struct ar5523_tx_data) >
+ IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ do {
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ if (!list_empty(&ar->tx_queue_pending)) {
+ data = (struct ar5523_tx_data *)
+ ar->tx_queue_pending.next;
+ list_del(&data->list);
+ } else
+ data = NULL;
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+
+ if (!data)
+ break;
+
+ txi = container_of((void *)data, struct ieee80211_tx_info,
+ driver_data);
+ txqid = 0;
+
+ skb = container_of((void *)txi, struct sk_buff, cb);
+ paylen = skb->len;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ ieee80211_free_txskb(ar->hw, skb);
+ continue;
+ }
+
+ data->ar = ar;
+ data->urb = urb;
+
+ desc = skb_push(skb, sizeof(*desc));
+ chunk = skb_push(skb, sizeof(*chunk));
+
+ chunk->seqnum = 0;
+ chunk->flags = UATH_CFLAGS_FINAL;
+ chunk->length = cpu_to_be16(skb->len);
+
+ desc->msglen = cpu_to_be32(skb->len);
+ desc->msgid = AR5523_DATA_ID;
+ desc->buflen = cpu_to_be32(paylen);
+ desc->type = cpu_to_be32(WDCMSG_SEND);
+ desc->flags = cpu_to_be32(UATH_TX_NOTIFY);
+
+ if (test_bit(AR5523_CONNECTED, &ar->flags))
+ desc->connid = cpu_to_be32(AR5523_ID_BSS);
+ else
+ desc->connid = cpu_to_be32(AR5523_ID_BROADCAST);
+
+ if (txi->flags & IEEE80211_TX_CTL_USE_MINRATE)
+ txqid |= UATH_TXQID_MINRATE;
+
+ desc->txqid = cpu_to_be32(txqid);
+
+ urb->transfer_flags = URB_ZERO_PACKET;
+ usb_fill_bulk_urb(urb, ar->dev, ar5523_data_tx_pipe(ar->dev),
+ skb->data, skb->len, ar5523_data_tx_cb, skb);
+
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_add_tail(&data->list, &ar->tx_queue_submitted);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+ mod_timer(&ar->tx_wd_timer, jiffies + AR5523_TX_WD_TIMEOUT);
+ atomic_inc(&ar->tx_nr_pending);
+
+ ar5523_dbg(ar, "TX Frame (%d pending)\n",
+ atomic_read(&ar->tx_nr_pending));
+ error = usb_submit_urb(urb, GFP_KERNEL);
+ if (error) {
+ ar5523_err(ar, "error %d when submitting tx urb\n",
+ error);
+ spin_lock_irqsave(&ar->tx_data_list_lock, flags);
+ list_del(&data->list);
+ spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
+ atomic_dec(&ar->tx_nr_pending);
+ ar5523_data_tx_pkt_put(ar);
+ usb_free_urb(urb);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ } while (true);
+}
+
+static void ar5523_tx_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, tx_work);
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ mutex_lock(&ar->mutex);
+ ar5523_tx_work_locked(ar);
+ mutex_unlock(&ar->mutex);
+}
+
+static void ar5523_tx_wd_timer(struct timer_list *t)
+{
+ struct ar5523 *ar = timer_container_of(ar, t, tx_wd_timer);
+
+ ar5523_dbg(ar, "TX watchdog timer triggered\n");
+ ieee80211_queue_work(ar->hw, &ar->tx_wd_work);
+}
+
+static void ar5523_tx_wd_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, tx_wd_work);
+
+ /* Occasionally the TX queues stop responding. The only way to
+ * recover seems to be to reset the dongle.
+ */
+
+ mutex_lock(&ar->mutex);
+ ar5523_err(ar, "TX queue stuck (tot %d pend %d)\n",
+ atomic_read(&ar->tx_nr_total),
+ atomic_read(&ar->tx_nr_pending));
+
+ ar5523_err(ar, "Will restart dongle.\n");
+ ar5523_cmd_write(ar, WDCMSG_TARGET_RESET, NULL, 0, 0);
+ mutex_unlock(&ar->mutex);
+}
+
+static void ar5523_flush_tx(struct ar5523 *ar)
+{
+ ar5523_tx_work_locked(ar);
+
+ /* Don't waste time trying to flush if USB is disconnected */
+ if (test_bit(AR5523_USB_DISCONNECTED, &ar->flags))
+ return;
+ if (!wait_event_timeout(ar->tx_flush_waitq,
+ !atomic_read(&ar->tx_nr_pending), AR5523_FLUSH_TIMEOUT))
+ ar5523_err(ar, "flush timeout (tot %d pend %d)\n",
+ atomic_read(&ar->tx_nr_total),
+ atomic_read(&ar->tx_nr_pending));
+}
+
+static void ar5523_free_tx_cmd(struct ar5523 *ar)
+{
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+
+ usb_free_coherent(ar->dev, AR5523_MAX_RXCMDSZ, cmd->buf_tx,
+ cmd->urb_tx->transfer_dma);
+ usb_free_urb(cmd->urb_tx);
+}
+
+static int ar5523_alloc_tx_cmd(struct ar5523 *ar)
+{
+ struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
+
+ cmd->ar = ar;
+ init_completion(&cmd->done);
+
+ cmd->urb_tx = usb_alloc_urb(0, GFP_KERNEL);
+ if (!cmd->urb_tx)
+ return -ENOMEM;
+ cmd->buf_tx = usb_alloc_coherent(ar->dev, AR5523_MAX_TXCMDSZ,
+ GFP_KERNEL,
+ &cmd->urb_tx->transfer_dma);
+ if (!cmd->buf_tx) {
+ usb_free_urb(cmd->urb_tx);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/*
+ * This function is called periodically (every second) when associated to
+ * query device statistics.
+ */
+static void ar5523_stat_work(struct work_struct *work)
+{
+ struct ar5523 *ar = container_of(work, struct ar5523, stat_work.work);
+ int error;
+
+ ar5523_dbg(ar, "%s\n", __func__);
+ mutex_lock(&ar->mutex);
+
+ /*
+ * Send request for statistics asynchronously once a second. This
+ * seems to be important. Throughput is a lot better if this is done.
+ */
+ error = ar5523_cmd_write(ar, WDCMSG_TARGET_GET_STATS, NULL, 0, 0);
+ if (error)
+ ar5523_err(ar, "could not query stats, error %d\n", error);
+ mutex_unlock(&ar->mutex);
+ ieee80211_queue_delayed_work(ar->hw, &ar->stat_work, HZ);
+}
+
+/*
+ * Interface routines to the mac80211 stack.
+ */
+static int ar5523_start(struct ieee80211_hw *hw)
+{
+ struct ar5523 *ar = hw->priv;
+ int error;
+ __be32 val;
+
+ ar5523_dbg(ar, "start called\n");
+
+ mutex_lock(&ar->mutex);
+ val = cpu_to_be32(0);
+ ar5523_cmd_write(ar, WDCMSG_BIND, &val, sizeof(val), 0);
+
+ /* set MAC address */
+ ar5523_config_multi(ar, CFG_MAC_ADDR, &ar->hw->wiphy->perm_addr,
+ ETH_ALEN);
+
+ /* XXX honor net80211 state */
+ ar5523_config(ar, CFG_RATE_CONTROL_ENABLE, 0x00000001);
+ ar5523_config(ar, CFG_DIVERSITY_CTL, 0x00000001);
+ ar5523_config(ar, CFG_ABOLT, 0x0000003f);
+ ar5523_config(ar, CFG_WME_ENABLED, 0x00000000);
+
+ ar5523_config(ar, CFG_SERVICE_TYPE, 1);
+ ar5523_config(ar, CFG_TP_SCALE, 0x00000000);
+ ar5523_config(ar, CFG_TPC_HALF_DBM5, 0x0000003c);
+ ar5523_config(ar, CFG_TPC_HALF_DBM2, 0x0000003c);
+ ar5523_config(ar, CFG_OVERRD_TX_POWER, 0x00000000);
+ ar5523_config(ar, CFG_GMODE_PROTECTION, 0x00000000);
+ ar5523_config(ar, CFG_GMODE_PROTECT_RATE_INDEX, 0x00000003);
+ ar5523_config(ar, CFG_PROTECTION_TYPE, 0x00000000);
+ ar5523_config(ar, CFG_MODE_CTS, 0x00000002);
+
+ error = ar5523_cmd_read(ar, WDCMSG_TARGET_START, NULL, 0,
+ &val, sizeof(val), AR5523_CMD_FLAG_MAGIC);
+ if (error) {
+ ar5523_dbg(ar, "could not start target, error %d\n", error);
+ goto err;
+ }
+ ar5523_dbg(ar, "WDCMSG_TARGET_START returns handle: 0x%x\n",
+ be32_to_cpu(val));
+
+ ar5523_switch_chan(ar);
+
+ val = cpu_to_be32(TARGET_DEVICE_AWAKE);
+ ar5523_cmd_write(ar, WDCMSG_SET_PWR_MODE, &val, sizeof(val), 0);
+ /* XXX? check */
+ ar5523_cmd_write(ar, WDCMSG_RESET_KEY_CACHE, NULL, 0, 0);
+
+ set_bit(AR5523_HW_UP, &ar->flags);
+ queue_work(ar->wq, &ar->rx_refill_work);
+
+ /* enable Rx */
+ ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT);
+ ar5523_set_rxfilter(ar,
+ UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST |
+ UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON,
+ UATH_FILTER_OP_SET);
+
+ ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_ON);
+ ar5523_dbg(ar, "start OK\n");
+
+err:
+ mutex_unlock(&ar->mutex);
+ return error;
+}
+
+static void ar5523_stop(struct ieee80211_hw *hw, bool suspend)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "stop called\n");
+
+ cancel_delayed_work_sync(&ar->stat_work);
+ mutex_lock(&ar->mutex);
+ clear_bit(AR5523_HW_UP, &ar->flags);
+
+ ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF);
+ ar5523_set_ledsteady(ar, UATH_LED_ACTIVITY, UATH_LED_OFF);
+
+ ar5523_cmd_write(ar, WDCMSG_TARGET_STOP, NULL, 0, 0);
+
+ timer_delete_sync(&ar->tx_wd_timer);
+ cancel_work_sync(&ar->tx_wd_work);
+ cancel_work_sync(&ar->rx_refill_work);
+ ar5523_cancel_rx_bufs(ar);
+ mutex_unlock(&ar->mutex);
+}
+
+static int ar5523_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
+{
+ struct ar5523 *ar = hw->priv;
+ int ret;
+
+ ar5523_dbg(ar, "set_rts_threshold called\n");
+ mutex_lock(&ar->mutex);
+
+ ret = ar5523_config(ar, CFG_USER_RTS_THRESHOLD, value);
+
+ mutex_unlock(&ar->mutex);
+ return ret;
+}
+
+static void ar5523_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "flush called\n");
+ ar5523_flush_tx(ar);
+}
+
+static int ar5523_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "add interface called\n");
+
+ if (ar->vif) {
+ ar5523_dbg(ar, "invalid add_interface\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ ar->vif = vif;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static void ar5523_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "remove interface called\n");
+ ar->vif = NULL;
+}
+
+static int ar5523_hwconfig(struct ieee80211_hw *hw, int radio_idx, u32 changed)
+{
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "config called\n");
+ mutex_lock(&ar->mutex);
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ ar5523_dbg(ar, "Do channel switch\n");
+ ar5523_flush_tx(ar);
+ ar5523_switch_chan(ar);
+ }
+ mutex_unlock(&ar->mutex);
+ return 0;
+}
+
+static int ar5523_get_wlan_mode(struct ar5523 *ar,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ieee80211_supported_band *band;
+ int bit;
+ struct ieee80211_sta *sta;
+ u32 sta_rate_set;
+
+ band = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
+ sta = ieee80211_find_sta(ar->vif, bss_conf->bssid);
+ if (!sta) {
+ ar5523_info(ar, "STA not found!\n");
+ return WLAN_MODE_11b;
+ }
+ sta_rate_set = sta->deflink.supp_rates[ar->hw->conf.chandef.chan->band];
+
+ for (bit = 0; bit < band->n_bitrates; bit++) {
+ if (sta_rate_set & 1) {
+ int rate = band->bitrates[bit].bitrate;
+ switch (rate) {
+ case 60:
+ case 90:
+ case 120:
+ case 180:
+ case 240:
+ case 360:
+ case 480:
+ case 540:
+ return WLAN_MODE_11g;
+ }
+ }
+ sta_rate_set >>= 1;
+ }
+ return WLAN_MODE_11b;
+}
+
+static void ar5523_create_rateset(struct ar5523 *ar,
+ struct ieee80211_bss_conf *bss_conf,
+ struct ar5523_cmd_rateset *rs,
+ bool basic)
+{
+ struct ieee80211_supported_band *band;
+ struct ieee80211_sta *sta;
+ int bit, i = 0;
+ u32 sta_rate_set, basic_rate_set;
+
+ sta = ieee80211_find_sta(ar->vif, bss_conf->bssid);
+ basic_rate_set = bss_conf->basic_rates;
+ if (!sta) {
+ ar5523_info(ar, "STA not found. Cannot set rates\n");
+ sta_rate_set = bss_conf->basic_rates;
+ } else
+ sta_rate_set = sta->deflink.supp_rates[ar->hw->conf.chandef.chan->band];
+
+ ar5523_dbg(ar, "sta rate_set = %08x\n", sta_rate_set);
+
+ band = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
+ for (bit = 0; bit < band->n_bitrates; bit++) {
+ BUG_ON(i >= AR5523_MAX_NRATES);
+ ar5523_dbg(ar, "Considering rate %d : %d\n",
+ band->bitrates[bit].hw_value, sta_rate_set & 1);
+ if (sta_rate_set & 1) {
+ rs->set[i] = band->bitrates[bit].hw_value;
+ if (basic_rate_set & 1 && basic)
+ rs->set[i] |= 0x80;
+ i++;
+ }
+ sta_rate_set >>= 1;
+ basic_rate_set >>= 1;
+ }
+
+ rs->length = i;
+}
+
+static int ar5523_set_basic_rates(struct ar5523 *ar,
+ struct ieee80211_bss_conf *bss)
+{
+ struct ar5523_cmd_rates rates;
+
+ memset(&rates, 0, sizeof(rates));
+ rates.connid = cpu_to_be32(2); /* XXX */
+ rates.size = cpu_to_be32(sizeof(struct ar5523_cmd_rateset));
+ ar5523_create_rateset(ar, bss, &rates.rateset, true);
+
+ return ar5523_cmd_write(ar, WDCMSG_SET_BASIC_RATE, &rates,
+ sizeof(rates), 0);
+}
+
+static int ar5523_create_connection(struct ar5523 *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss)
+{
+ struct ar5523_cmd_create_connection create;
+ int wlan_mode;
+
+ memset(&create, 0, sizeof(create));
+ create.connid = cpu_to_be32(2);
+ create.bssid = cpu_to_be32(0);
+ /* XXX packed or not? */
+ create.size = cpu_to_be32(sizeof(struct ar5523_cmd_rateset));
+
+ ar5523_create_rateset(ar, bss, &create.connattr.rateset, false);
+
+ wlan_mode = ar5523_get_wlan_mode(ar, bss);
+ create.connattr.wlanmode = cpu_to_be32(wlan_mode);
+
+ return ar5523_cmd_write(ar, WDCMSG_CREATE_CONNECTION, &create,
+ sizeof(create), 0);
+}
+
+static int ar5523_write_associd(struct ar5523 *ar, struct ieee80211_vif *vif)
+{
+ struct ieee80211_bss_conf *bss = &vif->bss_conf;
+ struct ar5523_cmd_set_associd associd;
+
+ memset(&associd, 0, sizeof(associd));
+ associd.defaultrateix = cpu_to_be32(0); /* XXX */
+ associd.associd = cpu_to_be32(vif->cfg.aid);
+ associd.timoffset = cpu_to_be32(0x3b); /* XXX */
+ memcpy(associd.bssid, bss->bssid, ETH_ALEN);
+ return ar5523_cmd_write(ar, WDCMSG_WRITE_ASSOCID, &associd,
+ sizeof(associd), 0);
+}
+
+static void ar5523_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss,
+ u64 changed)
+{
+ struct ar5523 *ar = hw->priv;
+ int error;
+
+ ar5523_dbg(ar, "bss_info_changed called\n");
+ mutex_lock(&ar->mutex);
+
+ if (!(changed & BSS_CHANGED_ASSOC))
+ goto out_unlock;
+
+ if (vif->cfg.assoc) {
+ error = ar5523_create_connection(ar, vif, bss);
+ if (error) {
+ ar5523_err(ar, "could not create connection\n");
+ goto out_unlock;
+ }
+
+ error = ar5523_set_basic_rates(ar, bss);
+ if (error) {
+ ar5523_err(ar, "could not set negotiated rate set\n");
+ goto out_unlock;
+ }
+
+ error = ar5523_write_associd(ar, vif);
+ if (error) {
+ ar5523_err(ar, "could not set association\n");
+ goto out_unlock;
+ }
+
+ /* turn link LED on */
+ ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_ON);
+ set_bit(AR5523_CONNECTED, &ar->flags);
+ ieee80211_queue_delayed_work(hw, &ar->stat_work, HZ);
+
+ } else {
+ cancel_delayed_work(&ar->stat_work);
+ clear_bit(AR5523_CONNECTED, &ar->flags);
+ ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF);
+ }
+
+out_unlock:
+ mutex_unlock(&ar->mutex);
+
+}
+
+#define AR5523_SUPPORTED_FILTERS (FIF_ALLMULTI | \
+ FIF_FCSFAIL | \
+ FIF_OTHER_BSS)
+
+static void ar5523_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ar5523 *ar = hw->priv;
+ u32 filter = 0;
+
+ ar5523_dbg(ar, "configure_filter called\n");
+ mutex_lock(&ar->mutex);
+ ar5523_flush_tx(ar);
+
+ *total_flags &= AR5523_SUPPORTED_FILTERS;
+
+ /* The filters seems strange. UATH_FILTER_RX_BCAST and
+ * UATH_FILTER_RX_MCAST does not result in those frames being RXed.
+ * The only way I have found to get [mb]cast frames seems to be
+ * to set UATH_FILTER_RX_PROM. */
+ filter |= UATH_FILTER_RX_UCAST | UATH_FILTER_RX_MCAST |
+ UATH_FILTER_RX_BCAST | UATH_FILTER_RX_BEACON |
+ UATH_FILTER_RX_PROM;
+
+ ar5523_set_rxfilter(ar, 0, UATH_FILTER_OP_INIT);
+ ar5523_set_rxfilter(ar, filter, UATH_FILTER_OP_SET);
+
+ mutex_unlock(&ar->mutex);
+}
+
+static const struct ieee80211_ops ar5523_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
+ .start = ar5523_start,
+ .stop = ar5523_stop,
+ .tx = ar5523_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
+ .set_rts_threshold = ar5523_set_rts_threshold,
+ .add_interface = ar5523_add_interface,
+ .remove_interface = ar5523_remove_interface,
+ .config = ar5523_hwconfig,
+ .bss_info_changed = ar5523_bss_info_changed,
+ .configure_filter = ar5523_configure_filter,
+ .flush = ar5523_flush,
+};
+
+static int ar5523_host_available(struct ar5523 *ar)
+{
+ struct ar5523_cmd_host_available setup;
+
+ /* inform target the host is available */
+ setup.sw_ver_major = cpu_to_be32(ATH_SW_VER_MAJOR);
+ setup.sw_ver_minor = cpu_to_be32(ATH_SW_VER_MINOR);
+ setup.sw_ver_patch = cpu_to_be32(ATH_SW_VER_PATCH);
+ setup.sw_ver_build = cpu_to_be32(ATH_SW_VER_BUILD);
+ return ar5523_cmd_read(ar, WDCMSG_HOST_AVAILABLE,
+ &setup, sizeof(setup), NULL, 0, 0);
+}
+
+static int ar5523_get_devstatus(struct ar5523 *ar)
+{
+ u8 macaddr[ETH_ALEN];
+ int error;
+
+ /* retrieve MAC address */
+ error = ar5523_get_status(ar, ST_MAC_ADDR, macaddr, ETH_ALEN);
+ if (error) {
+ ar5523_err(ar, "could not read MAC address\n");
+ return error;
+ }
+
+ SET_IEEE80211_PERM_ADDR(ar->hw, macaddr);
+
+ error = ar5523_get_status(ar, ST_SERIAL_NUMBER,
+ &ar->serial[0], sizeof(ar->serial));
+ if (error) {
+ ar5523_err(ar, "could not read device serial number\n");
+ return error;
+ }
+ return 0;
+}
+
+#define AR5523_SANE_RXBUFSZ 2000
+
+static int ar5523_get_max_rxsz(struct ar5523 *ar)
+{
+ int error;
+ __be32 rxsize;
+
+ /* Get max rx size */
+ error = ar5523_get_status(ar, ST_WDC_TRANSPORT_CHUNK_SIZE, &rxsize,
+ sizeof(rxsize));
+ if (error != 0) {
+ ar5523_err(ar, "could not read max RX size\n");
+ return error;
+ }
+
+ ar->rxbufsz = be32_to_cpu(rxsize);
+
+ if (!ar->rxbufsz || ar->rxbufsz > AR5523_SANE_RXBUFSZ) {
+ ar5523_err(ar, "Bad rxbufsz from device. Using %d instead\n",
+ AR5523_SANE_RXBUFSZ);
+ ar->rxbufsz = AR5523_SANE_RXBUFSZ;
+ }
+
+ ar5523_dbg(ar, "Max RX buf size: %d\n", ar->rxbufsz);
+ return 0;
+}
+
+/*
+ * This is copied from rtl818x, but we should probably move this
+ * to common code as in OpenBSD.
+ */
+static const struct ieee80211_rate ar5523_rates[] = {
+ { .bitrate = 10, .hw_value = 2, },
+ { .bitrate = 20, .hw_value = 4 },
+ { .bitrate = 55, .hw_value = 11, },
+ { .bitrate = 110, .hw_value = 22, },
+ { .bitrate = 60, .hw_value = 12, },
+ { .bitrate = 90, .hw_value = 18, },
+ { .bitrate = 120, .hw_value = 24, },
+ { .bitrate = 180, .hw_value = 36, },
+ { .bitrate = 240, .hw_value = 48, },
+ { .bitrate = 360, .hw_value = 72, },
+ { .bitrate = 480, .hw_value = 96, },
+ { .bitrate = 540, .hw_value = 108, },
+};
+
+static const struct ieee80211_channel ar5523_channels[] = {
+ { .center_freq = 2412 },
+ { .center_freq = 2417 },
+ { .center_freq = 2422 },
+ { .center_freq = 2427 },
+ { .center_freq = 2432 },
+ { .center_freq = 2437 },
+ { .center_freq = 2442 },
+ { .center_freq = 2447 },
+ { .center_freq = 2452 },
+ { .center_freq = 2457 },
+ { .center_freq = 2462 },
+ { .center_freq = 2467 },
+ { .center_freq = 2472 },
+ { .center_freq = 2484 },
+};
+
+static int ar5523_init_modes(struct ar5523 *ar)
+{
+ BUILD_BUG_ON(sizeof(ar->channels) != sizeof(ar5523_channels));
+ BUILD_BUG_ON(sizeof(ar->rates) != sizeof(ar5523_rates));
+
+ memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels));
+ memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates));
+
+ ar->band.band = NL80211_BAND_2GHZ;
+ ar->band.channels = ar->channels;
+ ar->band.n_channels = ARRAY_SIZE(ar5523_channels);
+ ar->band.bitrates = ar->rates;
+ ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates);
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = &ar->band;
+ return 0;
+}
+
+/*
+ * Load the MIPS R4000 microcode into the device. Once the image is loaded,
+ * the device will detach itself from the bus and reattach later with a new
+ * product Id (a la ezusb).
+ */
+static int ar5523_load_firmware(struct usb_device *dev)
+{
+ struct ar5523_fwblock *txblock, *rxblock;
+ const struct firmware *fw;
+ void *fwbuf;
+ int len, offset;
+ int foolen; /* XXX(hch): handle short transfers */
+ int error = -ENXIO;
+
+ if (request_firmware(&fw, AR5523_FIRMWARE_FILE, &dev->dev)) {
+ dev_err(&dev->dev, "no firmware found: %s\n",
+ AR5523_FIRMWARE_FILE);
+ return -ENOENT;
+ }
+
+ txblock = kzalloc(sizeof(*txblock), GFP_KERNEL);
+ if (!txblock)
+ goto out;
+
+ rxblock = kmalloc(sizeof(*rxblock), GFP_KERNEL);
+ if (!rxblock)
+ goto out_free_txblock;
+
+ fwbuf = kmalloc(AR5523_MAX_FWBLOCK_SIZE, GFP_KERNEL);
+ if (!fwbuf)
+ goto out_free_rxblock;
+
+ txblock->flags = cpu_to_be32(AR5523_WRITE_BLOCK);
+ txblock->total = cpu_to_be32(fw->size);
+
+ offset = 0;
+ len = fw->size;
+ while (len > 0) {
+ int mlen = min(len, AR5523_MAX_FWBLOCK_SIZE);
+
+ txblock->remain = cpu_to_be32(len - mlen);
+ txblock->len = cpu_to_be32(mlen);
+
+ /* send firmware block meta-data */
+ error = usb_bulk_msg(dev, ar5523_cmd_tx_pipe(dev),
+ txblock, sizeof(*txblock), &foolen,
+ AR5523_CMD_TIMEOUT);
+ if (error) {
+ dev_err(&dev->dev,
+ "could not send firmware block info\n");
+ goto out_free_fwbuf;
+ }
+
+ /* send firmware block data */
+ memcpy(fwbuf, fw->data + offset, mlen);
+ error = usb_bulk_msg(dev, ar5523_data_tx_pipe(dev),
+ fwbuf, mlen, &foolen,
+ AR5523_DATA_TIMEOUT);
+ if (error) {
+ dev_err(&dev->dev,
+ "could not send firmware block data\n");
+ goto out_free_fwbuf;
+ }
+
+ /* wait for ack from firmware */
+ error = usb_bulk_msg(dev, ar5523_cmd_rx_pipe(dev),
+ rxblock, sizeof(*rxblock), &foolen,
+ AR5523_CMD_TIMEOUT);
+ if (error) {
+ dev_err(&dev->dev,
+ "could not read firmware answer\n");
+ goto out_free_fwbuf;
+ }
+
+ len -= mlen;
+ offset += mlen;
+ }
+
+ /*
+ * Set the error to -ENXIO to make sure we continue probing for
+ * a driver.
+ */
+ error = -ENXIO;
+
+ out_free_fwbuf:
+ kfree(fwbuf);
+ out_free_rxblock:
+ kfree(rxblock);
+ out_free_txblock:
+ kfree(txblock);
+ out:
+ release_firmware(fw);
+ return error;
+}
+
+static int ar5523_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct ieee80211_hw *hw;
+ struct ar5523 *ar;
+ int error = -ENOMEM;
+
+ static const u8 bulk_ep_addr[] = {
+ AR5523_CMD_TX_PIPE | USB_DIR_OUT,
+ AR5523_DATA_TX_PIPE | USB_DIR_OUT,
+ AR5523_CMD_RX_PIPE | USB_DIR_IN,
+ AR5523_DATA_RX_PIPE | USB_DIR_IN,
+ 0};
+
+ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr)) {
+ dev_err(&dev->dev,
+ "Could not find all expected endpoints\n");
+ error = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * Load firmware if the device requires it. This will return
+ * -ENXIO on success and we'll get called back afer the usb
+ * id changes to indicate that the firmware is present.
+ */
+ if (id->driver_info & AR5523_FLAG_PRE_FIRMWARE)
+ return ar5523_load_firmware(dev);
+
+
+ hw = ieee80211_alloc_hw(sizeof(*ar), &ar5523_ops);
+ if (!hw)
+ goto out;
+ SET_IEEE80211_DEV(hw, &intf->dev);
+
+ ar = hw->priv;
+ ar->hw = hw;
+ ar->dev = dev;
+ mutex_init(&ar->mutex);
+
+ INIT_DELAYED_WORK(&ar->stat_work, ar5523_stat_work);
+ timer_setup(&ar->tx_wd_timer, ar5523_tx_wd_timer, 0);
+ INIT_WORK(&ar->tx_wd_work, ar5523_tx_wd_work);
+ INIT_WORK(&ar->tx_work, ar5523_tx_work);
+ INIT_LIST_HEAD(&ar->tx_queue_pending);
+ INIT_LIST_HEAD(&ar->tx_queue_submitted);
+ spin_lock_init(&ar->tx_data_list_lock);
+ atomic_set(&ar->tx_nr_total, 0);
+ atomic_set(&ar->tx_nr_pending, 0);
+ init_waitqueue_head(&ar->tx_flush_waitq);
+
+ atomic_set(&ar->rx_data_free_cnt, 0);
+ INIT_WORK(&ar->rx_refill_work, ar5523_rx_refill_work);
+ INIT_LIST_HEAD(&ar->rx_data_free);
+ INIT_LIST_HEAD(&ar->rx_data_used);
+ spin_lock_init(&ar->rx_data_list_lock);
+
+ ar->wq = create_singlethread_workqueue("ar5523");
+ if (!ar->wq) {
+ ar5523_err(ar, "Could not create wq\n");
+ goto out_free_ar;
+ }
+
+ error = ar5523_alloc_rx_bufs(ar);
+ if (error) {
+ ar5523_err(ar, "Could not allocate rx buffers\n");
+ goto out_free_wq;
+ }
+
+ error = ar5523_alloc_rx_cmd(ar);
+ if (error) {
+ ar5523_err(ar, "Could not allocate rx command buffers\n");
+ goto out_free_rx_bufs;
+ }
+
+ error = ar5523_alloc_tx_cmd(ar);
+ if (error) {
+ ar5523_err(ar, "Could not allocate tx command buffers\n");
+ goto out_free_rx_cmd;
+ }
+
+ error = ar5523_submit_rx_cmd(ar);
+ if (error) {
+ ar5523_err(ar, "Failed to submit rx cmd\n");
+ goto out_free_tx_cmd;
+ }
+
+ /*
+ * We're now ready to send/receive firmware commands.
+ */
+ error = ar5523_host_available(ar);
+ if (error) {
+ ar5523_err(ar, "could not initialize adapter\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ error = ar5523_get_max_rxsz(ar);
+ if (error) {
+ ar5523_err(ar, "could not get caps from adapter\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ error = ar5523_get_devcap(ar);
+ if (error) {
+ ar5523_err(ar, "could not get caps from adapter\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ error = ar5523_get_devstatus(ar);
+ if (error != 0) {
+ ar5523_err(ar, "could not get device status\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ ar5523_info(ar, "MAC/BBP AR5523, RF AR%c112\n",
+ (id->driver_info & AR5523_FLAG_ABG) ? '5' : '2');
+
+ ar->vif = NULL;
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ hw->extra_tx_headroom = sizeof(struct ar5523_tx_desc) +
+ sizeof(struct ar5523_chunk);
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ hw->queues = 1;
+
+ error = ar5523_init_modes(ar);
+ if (error)
+ goto out_cancel_rx_cmd;
+
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
+ usb_set_intfdata(intf, hw);
+
+ error = ieee80211_register_hw(hw);
+ if (error) {
+ ar5523_err(ar, "could not register device\n");
+ goto out_cancel_rx_cmd;
+ }
+
+ ar5523_info(ar, "Found and initialized AR5523 device\n");
+ return 0;
+
+out_cancel_rx_cmd:
+ ar5523_cancel_rx_cmd(ar);
+out_free_tx_cmd:
+ ar5523_free_tx_cmd(ar);
+out_free_rx_cmd:
+ ar5523_free_rx_cmd(ar);
+out_free_rx_bufs:
+ ar5523_free_rx_bufs(ar);
+out_free_wq:
+ destroy_workqueue(ar->wq);
+out_free_ar:
+ ieee80211_free_hw(hw);
+out:
+ return error;
+}
+
+static void ar5523_disconnect(struct usb_interface *intf)
+{
+ struct ieee80211_hw *hw = usb_get_intfdata(intf);
+ struct ar5523 *ar = hw->priv;
+
+ ar5523_dbg(ar, "detaching\n");
+ set_bit(AR5523_USB_DISCONNECTED, &ar->flags);
+
+ ieee80211_unregister_hw(hw);
+
+ ar5523_cancel_rx_cmd(ar);
+ ar5523_free_tx_cmd(ar);
+ ar5523_free_rx_cmd(ar);
+ ar5523_free_rx_bufs(ar);
+
+ destroy_workqueue(ar->wq);
+
+ ieee80211_free_hw(hw);
+ usb_set_intfdata(intf, NULL);
+}
+
+#define AR5523_DEVICE_UG(vendor, device) \
+ { USB_DEVICE((vendor), (device)) }, \
+ { USB_DEVICE((vendor), (device) + 1), \
+ .driver_info = AR5523_FLAG_PRE_FIRMWARE }
+#define AR5523_DEVICE_UX(vendor, device) \
+ { USB_DEVICE((vendor), (device)), \
+ .driver_info = AR5523_FLAG_ABG }, \
+ { USB_DEVICE((vendor), (device) + 1), \
+ .driver_info = AR5523_FLAG_ABG|AR5523_FLAG_PRE_FIRMWARE }
+
+static const struct usb_device_id ar5523_id_table[] = {
+ AR5523_DEVICE_UG(0x168c, 0x0001), /* Atheros / AR5523 */
+ AR5523_DEVICE_UG(0x0cf3, 0x0001), /* Atheros2 / AR5523_1 */
+ AR5523_DEVICE_UG(0x0cf3, 0x0003), /* Atheros2 / AR5523_2 */
+ AR5523_DEVICE_UX(0x0cf3, 0x0005), /* Atheros2 / AR5523_3 */
+ AR5523_DEVICE_UG(0x0d8e, 0x7801), /* Conceptronic / AR5523_1 */
+ AR5523_DEVICE_UX(0x0d8e, 0x7811), /* Conceptronic / AR5523_2 */
+ AR5523_DEVICE_UX(0x2001, 0x3a00), /* Dlink / DWLAG132 */
+ AR5523_DEVICE_UG(0x2001, 0x3a02), /* Dlink / DWLG132 */
+ AR5523_DEVICE_UX(0x2001, 0x3a04), /* Dlink / DWLAG122 */
+ AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
+ AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
+ AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
+ AR5523_DEVICE_UG(0x129b, 0x160b), /* Gigaset / USB stick 108
+ (CyberTAN Technology) */
+ AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */
+ AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */
+ AR5523_DEVICE_UG(0x0d8e, 0x7802), /* Globalsun / AR5523_3 */
+ AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */
+ AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */
+ AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */
+ AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect
+ SMCWUSBT-G2 */
+ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1, TEW444UBEU*/
+ AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */
+ AR5523_DEVICE_UG(0x1435, 0x0826), /* Wistronneweb / AR5523_1 */
+ AR5523_DEVICE_UX(0x1435, 0x0828), /* Wistronneweb / AR5523_2 */
+ AR5523_DEVICE_UG(0x0cde, 0x0012), /* Zcom / AR5523 */
+ AR5523_DEVICE_UG(0x1385, 0x4250), /* Netgear3 / WG111T (2) */
+ AR5523_DEVICE_UG(0x1385, 0x5f00), /* Netgear / WPN111 */
+ AR5523_DEVICE_UG(0x1385, 0x5f02), /* Netgear / WPN111 */
+ { }
+};
+MODULE_DEVICE_TABLE(usb, ar5523_id_table);
+
+static struct usb_driver ar5523_driver = {
+ .name = "ar5523",
+ .id_table = ar5523_id_table,
+ .probe = ar5523_probe,
+ .disconnect = ar5523_disconnect,
+};
+
+module_usb_driver(ar5523_driver);
+
+MODULE_DESCRIPTION("Atheros AR5523 wireless driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(AR5523_FIRMWARE_FILE);
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.h b/drivers/net/wireless/ath/ar5523/ar5523.h
new file mode 100644
index 000000000000..9a322a65cdb5
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
+ * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define AR5523_FLAG_PRE_FIRMWARE (1 << 0)
+#define AR5523_FLAG_ABG (1 << 1)
+
+#define AR5523_FIRMWARE_FILE "ar5523.bin"
+
+#define AR5523_CMD_TX_PIPE 0x01
+#define AR5523_DATA_TX_PIPE 0x02
+#define AR5523_CMD_RX_PIPE 0x81
+#define AR5523_DATA_RX_PIPE 0x82
+
+#define ar5523_cmd_tx_pipe(dev) \
+ usb_sndbulkpipe((dev), AR5523_CMD_TX_PIPE)
+#define ar5523_data_tx_pipe(dev) \
+ usb_sndbulkpipe((dev), AR5523_DATA_TX_PIPE)
+#define ar5523_cmd_rx_pipe(dev) \
+ usb_rcvbulkpipe((dev), AR5523_CMD_RX_PIPE)
+#define ar5523_data_rx_pipe(dev) \
+ usb_rcvbulkpipe((dev), AR5523_DATA_RX_PIPE)
+
+#define AR5523_DATA_TIMEOUT 10000
+#define AR5523_CMD_TIMEOUT 1000
+
+#define AR5523_TX_DATA_COUNT 8
+#define AR5523_TX_DATA_RESTART_COUNT 2
+#define AR5523_RX_DATA_COUNT 16
+#define AR5523_RX_DATA_REFILL_COUNT 8
+
+#define AR5523_CMD_ID 1
+#define AR5523_DATA_ID 2
+
+#define AR5523_TX_WD_TIMEOUT (HZ * 2)
+#define AR5523_FLUSH_TIMEOUT (HZ * 3)
+
+enum AR5523_flags {
+ AR5523_HW_UP,
+ AR5523_USB_DISCONNECTED,
+ AR5523_CONNECTED
+};
+
+struct ar5523_tx_cmd {
+ struct ar5523 *ar;
+ struct urb *urb_tx;
+ void *buf_tx;
+ void *odata;
+ int olen;
+ int flags;
+ int res;
+ struct completion done;
+};
+
+/* This struct is placed in tx_info->driver_data. It must not be larger
+ * than IEEE80211_TX_INFO_DRIVER_DATA_SIZE.
+ */
+struct ar5523_tx_data {
+ struct list_head list;
+ struct ar5523 *ar;
+ struct urb *urb;
+};
+
+struct ar5523_rx_data {
+ struct list_head list;
+ struct ar5523 *ar;
+ struct urb *urb;
+ struct sk_buff *skb;
+};
+
+struct ar5523 {
+ struct usb_device *dev;
+ struct ieee80211_hw *hw;
+
+ unsigned long flags;
+ struct mutex mutex;
+ struct workqueue_struct *wq;
+
+ struct ar5523_tx_cmd tx_cmd;
+
+ struct delayed_work stat_work;
+
+ struct timer_list tx_wd_timer;
+ struct work_struct tx_wd_work;
+ struct work_struct tx_work;
+ struct list_head tx_queue_pending;
+ struct list_head tx_queue_submitted;
+ spinlock_t tx_data_list_lock;
+ wait_queue_head_t tx_flush_waitq;
+
+ /* Queued + Submitted TX frames */
+ atomic_t tx_nr_total;
+
+ /* Submitted TX frames */
+ atomic_t tx_nr_pending;
+
+ void *rx_cmd_buf;
+ struct urb *rx_cmd_urb;
+
+ struct ar5523_rx_data rx_data[AR5523_RX_DATA_COUNT];
+ spinlock_t rx_data_list_lock;
+ struct list_head rx_data_free;
+ struct list_head rx_data_used;
+ atomic_t rx_data_free_cnt;
+
+ struct work_struct rx_refill_work;
+
+ unsigned int rxbufsz;
+ u8 serial[16];
+
+ struct ieee80211_channel channels[14];
+ struct ieee80211_rate rates[12];
+ struct ieee80211_supported_band band;
+ struct ieee80211_vif *vif;
+};
+
+/* flags for sending firmware commands */
+#define AR5523_CMD_FLAG_READ (1 << 1)
+#define AR5523_CMD_FLAG_MAGIC (1 << 2)
+
+#define ar5523_dbg(ar, format, arg...) \
+ dev_dbg(&(ar)->dev->dev, format, ## arg)
+
+/* On USB hot-unplug there can be a lot of URBs in flight and they'll all
+ * fail. Instead of dealing with them in every possible place just surpress
+ * any messages on USB disconnect.
+ */
+#define ar5523_err(ar, format, arg...) \
+do { \
+ if (!test_bit(AR5523_USB_DISCONNECTED, &ar->flags)) { \
+ dev_err(&(ar)->dev->dev, format, ## arg); \
+ } \
+} while (0)
+#define ar5523_info(ar, format, arg...) \
+ dev_info(&(ar)->dev->dev, format, ## arg)
diff --git a/drivers/net/wireless/ath/ar5523/ar5523_hw.h b/drivers/net/wireless/ath/ar5523/ar5523_hw.h
new file mode 100644
index 000000000000..0fe2c803f48f
--- /dev/null
+++ b/drivers/net/wireless/ath/ar5523/ar5523_hw.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2006 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2006 Sam Leffler, Errno Consulting
+ * Copyright (c) 2007 Christoph Hellwig <hch@lst.de>
+ * Copyright (c) 2008-2009 Weongyo Jeong <weongyo@freebsd.org>
+ * Copyright (c) 2012 Pontus Fuchs <pontus.fuchs@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* all fields are big endian */
+struct ar5523_fwblock {
+ __be32 flags;
+#define AR5523_WRITE_BLOCK (1 << 4)
+
+ __be32 len;
+#define AR5523_MAX_FWBLOCK_SIZE 2048
+
+ __be32 total;
+ __be32 remain;
+ __be32 rxtotal;
+ __be32 pad[123];
+} __packed;
+
+#define AR5523_MAX_RXCMDSZ 1024
+#define AR5523_MAX_TXCMDSZ 1024
+
+struct ar5523_cmd_hdr {
+ __be32 len;
+ __be32 code;
+/* NB: these are defined for rev 1.5 firmware; rev 1.6 is different */
+/* messages from Host -> Target */
+#define WDCMSG_HOST_AVAILABLE 0x01
+#define WDCMSG_BIND 0x02
+#define WDCMSG_TARGET_RESET 0x03
+#define WDCMSG_TARGET_GET_CAPABILITY 0x04
+#define WDCMSG_TARGET_SET_CONFIG 0x05
+#define WDCMSG_TARGET_GET_STATUS 0x06
+#define WDCMSG_TARGET_GET_STATS 0x07
+#define WDCMSG_TARGET_START 0x08
+#define WDCMSG_TARGET_STOP 0x09
+#define WDCMSG_TARGET_ENABLE 0x0a
+#define WDCMSG_TARGET_DISABLE 0x0b
+#define WDCMSG_CREATE_CONNECTION 0x0c
+#define WDCMSG_UPDATE_CONNECT_ATTR 0x0d
+#define WDCMSG_DELETE_CONNECT 0x0e
+#define WDCMSG_SEND 0x0f
+#define WDCMSG_FLUSH 0x10
+/* messages from Target -> Host */
+#define WDCMSG_STATS_UPDATE 0x11
+#define WDCMSG_BMISS 0x12
+#define WDCMSG_DEVICE_AVAIL 0x13
+#define WDCMSG_SEND_COMPLETE 0x14
+#define WDCMSG_DATA_AVAIL 0x15
+#define WDCMSG_SET_PWR_MODE 0x16
+#define WDCMSG_BMISS_ACK 0x17
+#define WDCMSG_SET_LED_STEADY 0x18
+#define WDCMSG_SET_LED_BLINK 0x19
+/* more messages */
+#define WDCMSG_SETUP_BEACON_DESC 0x1a
+#define WDCMSG_BEACON_INIT 0x1b
+#define WDCMSG_RESET_KEY_CACHE 0x1c
+#define WDCMSG_RESET_KEY_CACHE_ENTRY 0x1d
+#define WDCMSG_SET_KEY_CACHE_ENTRY 0x1e
+#define WDCMSG_SET_DECOMP_MASK 0x1f
+#define WDCMSG_SET_REGULATORY_DOMAIN 0x20
+#define WDCMSG_SET_LED_STATE 0x21
+#define WDCMSG_WRITE_ASSOCID 0x22
+#define WDCMSG_SET_STA_BEACON_TIMERS 0x23
+#define WDCMSG_GET_TSF 0x24
+#define WDCMSG_RESET_TSF 0x25
+#define WDCMSG_SET_ADHOC_MODE 0x26
+#define WDCMSG_SET_BASIC_RATE 0x27
+#define WDCMSG_MIB_CONTROL 0x28
+#define WDCMSG_GET_CHANNEL_DATA 0x29
+#define WDCMSG_GET_CUR_RSSI 0x2a
+#define WDCMSG_SET_ANTENNA_SWITCH 0x2b
+#define WDCMSG_USE_SHORT_SLOT_TIME 0x2f
+#define WDCMSG_SET_POWER_MODE 0x30
+#define WDCMSG_SETUP_PSPOLL_DESC 0x31
+#define WDCMSG_SET_RX_MULTICAST_FILTER 0x32
+#define WDCMSG_RX_FILTER 0x33
+#define WDCMSG_PER_CALIBRATION 0x34
+#define WDCMSG_RESET 0x35
+#define WDCMSG_DISABLE 0x36
+#define WDCMSG_PHY_DISABLE 0x37
+#define WDCMSG_SET_TX_POWER_LIMIT 0x38
+#define WDCMSG_SET_TX_QUEUE_PARAMS 0x39
+#define WDCMSG_SETUP_TX_QUEUE 0x3a
+#define WDCMSG_RELEASE_TX_QUEUE 0x3b
+#define WDCMSG_SET_DEFAULT_KEY 0x43
+
+ __u32 priv; /* driver private data,
+ don't care about endianess */
+ __be32 magic;
+ __be32 reserved2[4];
+};
+
+struct ar5523_cmd_host_available {
+ __be32 sw_ver_major;
+ __be32 sw_ver_minor;
+ __be32 sw_ver_patch;
+ __be32 sw_ver_build;
+} __packed;
+
+#define ATH_SW_VER_MAJOR 1
+#define ATH_SW_VER_MINOR 5
+#define ATH_SW_VER_PATCH 0
+#define ATH_SW_VER_BUILD 9999
+
+struct ar5523_chunk {
+ u8 seqnum; /* sequence number for ordering */
+ u8 flags;
+#define UATH_CFLAGS_FINAL 0x01 /* final chunk of a msg */
+#define UATH_CFLAGS_RXMSG 0x02 /* chunk contains rx completion */
+#define UATH_CFLAGS_DEBUG 0x04 /* for debugging */
+ __be16 length; /* chunk size in bytes */
+ /* chunk data follows */
+} __packed;
+
+/*
+ * Message format for a WDCMSG_DATA_AVAIL message from Target to Host.
+ */
+struct ar5523_rx_desc {
+ __be32 len; /* msg length including header */
+ __be32 code; /* WDCMSG_DATA_AVAIL */
+ __be32 gennum; /* generation number */
+ __be32 status; /* start of RECEIVE_INFO */
+#define UATH_STATUS_OK 0
+#define UATH_STATUS_STOP_IN_PROGRESS 1
+#define UATH_STATUS_CRC_ERR 2
+#define UATH_STATUS_PHY_ERR 3
+#define UATH_STATUS_DECRYPT_CRC_ERR 4
+#define UATH_STATUS_DECRYPT_MIC_ERR 5
+#define UATH_STATUS_DECOMP_ERR 6
+#define UATH_STATUS_KEY_ERR 7
+#define UATH_STATUS_ERR 8
+ __be32 tstamp_low; /* low-order 32-bits of rx timestamp */
+ __be32 tstamp_high; /* high-order 32-bits of rx timestamp */
+ __be32 framelen; /* frame length */
+ __be32 rate; /* rx rate code */
+ __be32 antenna;
+ __be32 rssi;
+ __be32 channel;
+ __be32 phyerror;
+ __be32 connix; /* key table ix for bss traffic */
+ __be32 decrypterror;
+ __be32 keycachemiss;
+ __be32 pad; /* XXX? */
+} __packed;
+
+struct ar5523_tx_desc {
+ __be32 msglen;
+ u32 msgid; /* msg id (supplied by host) */
+ __be32 type; /* opcode: WDMSG_SEND or WDCMSG_FLUSH */
+ __be32 txqid; /* tx queue id and flags */
+#define UATH_TXQID_MASK 0x0f
+#define UATH_TXQID_MINRATE 0x10 /* use min tx rate */
+#define UATH_TXQID_FF 0x20 /* content is fast frame */
+ __be32 connid; /* tx connection id */
+#define UATH_ID_INVALID 0xffffffff /* for sending prior to connection */
+ __be32 flags; /* non-zero if response desired */
+#define UATH_TX_NOTIFY (1 << 24) /* f/w will send a UATH_NOTIF_TX */
+ __be32 buflen; /* payload length */
+} __packed;
+
+
+#define AR5523_ID_BSS 2
+#define AR5523_ID_BROADCAST 0xffffffff
+
+/* structure for command UATH_CMD_WRITE_MAC */
+struct ar5523_write_mac {
+ __be32 reg;
+ __be32 len;
+ u8 data[32];
+} __packed;
+
+struct ar5523_cmd_rateset {
+ __u8 length;
+#define AR5523_MAX_NRATES 32
+ __u8 set[AR5523_MAX_NRATES];
+};
+
+struct ar5523_cmd_set_associd { /* AR5523_WRITE_ASSOCID */
+ __be32 defaultrateix;
+ __be32 associd;
+ __be32 timoffset;
+ __be32 turboprime;
+ __u8 bssid[6];
+} __packed;
+
+/* structure for command WDCMSG_RESET */
+struct ar5523_cmd_reset {
+ __be32 flags; /* channel flags */
+#define UATH_CHAN_TURBO 0x0100
+#define UATH_CHAN_CCK 0x0200
+#define UATH_CHAN_OFDM 0x0400
+#define UATH_CHAN_2GHZ 0x1000
+#define UATH_CHAN_5GHZ 0x2000
+ __be32 freq; /* channel frequency */
+ __be32 maxrdpower;
+ __be32 cfgctl;
+ __be32 twiceantennareduction;
+ __be32 channelchange;
+ __be32 keeprccontent;
+} __packed;
+
+/* structure for command WDCMSG_SET_BASIC_RATE */
+struct ar5523_cmd_rates {
+ __be32 connid;
+ __be32 keeprccontent;
+ __be32 size;
+ struct ar5523_cmd_rateset rateset;
+} __packed;
+
+enum {
+ WLAN_MODE_NONE = 0,
+ WLAN_MODE_11b,
+ WLAN_MODE_11a,
+ WLAN_MODE_11g,
+ WLAN_MODE_11a_TURBO,
+ WLAN_MODE_11g_TURBO,
+ WLAN_MODE_11a_TURBO_PRIME,
+ WLAN_MODE_11g_TURBO_PRIME,
+ WLAN_MODE_11a_XR,
+ WLAN_MODE_11g_XR,
+};
+
+struct ar5523_cmd_connection_attr {
+ __be32 longpreambleonly;
+ struct ar5523_cmd_rateset rateset;
+ __be32 wlanmode;
+} __packed;
+
+/* structure for command AR5523_CREATE_CONNECTION */
+struct ar5523_cmd_create_connection {
+ __be32 connid;
+ __be32 bssid;
+ __be32 size;
+ struct ar5523_cmd_connection_attr connattr;
+} __packed;
+
+struct ar5523_cmd_ledsteady { /* WDCMSG_SET_LED_STEADY */
+ __be32 lednum;
+#define UATH_LED_LINK 0
+#define UATH_LED_ACTIVITY 1
+ __be32 ledmode;
+#define UATH_LED_OFF 0
+#define UATH_LED_ON 1
+} __packed;
+
+struct ar5523_cmd_ledblink { /* WDCMSG_SET_LED_BLINK */
+ __be32 lednum;
+ __be32 ledmode;
+ __be32 blinkrate;
+ __be32 slowmode;
+} __packed;
+
+struct ar5523_cmd_ledstate { /* WDCMSG_SET_LED_STATE */
+ __be32 connected;
+} __packed;
+
+struct ar5523_cmd_txq_attr {
+ __be32 priority;
+ __be32 aifs;
+ __be32 logcwmin;
+ __be32 logcwmax;
+ __be32 bursttime;
+ __be32 mode;
+ __be32 qflags;
+} __packed;
+
+struct ar5523_cmd_txq_setup { /* WDCMSG_SETUP_TX_QUEUE */
+ __be32 qid;
+ __be32 len;
+ struct ar5523_cmd_txq_attr attr;
+} __packed;
+
+struct ar5523_cmd_rx_filter { /* WDCMSG_RX_FILTER */
+ __be32 bits;
+#define UATH_FILTER_RX_UCAST 0x00000001
+#define UATH_FILTER_RX_MCAST 0x00000002
+#define UATH_FILTER_RX_BCAST 0x00000004
+#define UATH_FILTER_RX_CONTROL 0x00000008
+#define UATH_FILTER_RX_BEACON 0x00000010 /* beacon frames */
+#define UATH_FILTER_RX_PROM 0x00000020 /* promiscuous mode */
+#define UATH_FILTER_RX_PHY_ERR 0x00000040 /* phy errors */
+#define UATH_FILTER_RX_PHY_RADAR 0x00000080 /* radar phy errors */
+#define UATH_FILTER_RX_XR_POOL 0x00000400 /* XR group polls */
+#define UATH_FILTER_RX_PROBE_REQ 0x00000800
+ __be32 op;
+#define UATH_FILTER_OP_INIT 0x0
+#define UATH_FILTER_OP_SET 0x1
+#define UATH_FILTER_OP_CLEAR 0x2
+#define UATH_FILTER_OP_TEMP 0x3
+#define UATH_FILTER_OP_RESTORE 0x4
+} __packed;
+
+enum {
+ CFG_NONE, /* Sentinal to indicate "no config" */
+ CFG_REG_DOMAIN, /* Regulatory Domain */
+ CFG_RATE_CONTROL_ENABLE,
+ CFG_DEF_XMIT_DATA_RATE, /* NB: if rate control is not enabled */
+ CFG_HW_TX_RETRIES,
+ CFG_SW_TX_RETRIES,
+ CFG_SLOW_CLOCK_ENABLE,
+ CFG_COMP_PROC,
+ CFG_USER_RTS_THRESHOLD,
+ CFG_XR2NORM_RATE_THRESHOLD,
+ CFG_XRMODE_SWITCH_COUNT,
+ CFG_PROTECTION_TYPE,
+ CFG_BURST_SEQ_THRESHOLD,
+ CFG_ABOLT,
+ CFG_IQ_LOG_COUNT_MAX,
+ CFG_MODE_CTS,
+ CFG_WME_ENABLED,
+ CFG_GPRS_CBR_PERIOD,
+ CFG_SERVICE_TYPE,
+ /* MAC Address to use. Overrides EEPROM */
+ CFG_MAC_ADDR,
+ CFG_DEBUG_EAR,
+ CFG_INIT_REGS,
+ /* An ID for use in error & debug messages */
+ CFG_DEBUG_ID,
+ CFG_COMP_WIN_SZ,
+ CFG_DIVERSITY_CTL,
+ CFG_TP_SCALE,
+ CFG_TPC_HALF_DBM5,
+ CFG_TPC_HALF_DBM2,
+ CFG_OVERRD_TX_POWER,
+ CFG_USE_32KHZ_CLOCK,
+ CFG_GMODE_PROTECTION,
+ CFG_GMODE_PROTECT_RATE_INDEX,
+ CFG_GMODE_NON_ERP_PREAMBLE,
+ CFG_WDC_TRANSPORT_CHUNK_SIZE,
+};
+
+enum {
+ /* Sentinal to indicate "no capability" */
+ CAP_NONE,
+ CAP_ALL, /* ALL capabilities */
+ CAP_TARGET_VERSION,
+ CAP_TARGET_REVISION,
+ CAP_MAC_VERSION,
+ CAP_MAC_REVISION,
+ CAP_PHY_REVISION,
+ CAP_ANALOG_5GHz_REVISION,
+ CAP_ANALOG_2GHz_REVISION,
+ /* Target supports WDC message debug features */
+ CAP_DEBUG_WDCMSG_SUPPORT,
+
+ CAP_REG_DOMAIN,
+ CAP_COUNTRY_CODE,
+ CAP_REG_CAP_BITS,
+
+ CAP_WIRELESS_MODES,
+ CAP_CHAN_SPREAD_SUPPORT,
+ CAP_SLEEP_AFTER_BEACON_BROKEN,
+ CAP_COMPRESS_SUPPORT,
+ CAP_BURST_SUPPORT,
+ CAP_FAST_FRAMES_SUPPORT,
+ CAP_CHAP_TUNING_SUPPORT,
+ CAP_TURBOG_SUPPORT,
+ CAP_TURBO_PRIME_SUPPORT,
+ CAP_DEVICE_TYPE,
+ CAP_XR_SUPPORT,
+ CAP_WME_SUPPORT,
+ CAP_TOTAL_QUEUES,
+ CAP_CONNECTION_ID_MAX, /* Should absorb CAP_KEY_CACHE_SIZE */
+
+ CAP_LOW_5GHZ_CHAN,
+ CAP_HIGH_5GHZ_CHAN,
+ CAP_LOW_2GHZ_CHAN,
+ CAP_HIGH_2GHZ_CHAN,
+
+ CAP_MIC_AES_CCM,
+ CAP_MIC_CKIP,
+ CAP_MIC_TKIP,
+ CAP_MIC_TKIP_WME,
+ CAP_CIPHER_AES_CCM,
+ CAP_CIPHER_CKIP,
+ CAP_CIPHER_TKIP,
+
+ CAP_TWICE_ANTENNAGAIN_5G,
+ CAP_TWICE_ANTENNAGAIN_2G,
+};
+
+enum {
+ ST_NONE, /* Sentinal to indicate "no status" */
+ ST_ALL,
+ ST_SERVICE_TYPE,
+ ST_WLAN_MODE,
+ ST_FREQ,
+ ST_BAND,
+ ST_LAST_RSSI,
+ ST_PS_FRAMES_DROPPED,
+ ST_CACHED_DEF_ANT,
+ ST_COUNT_OTHER_RX_ANT,
+ ST_USE_FAST_DIVERSITY,
+ ST_MAC_ADDR,
+ ST_RX_GENERATION_NUM,
+ ST_TX_QUEUE_DEPTH,
+ ST_SERIAL_NUMBER,
+ ST_WDC_TRANSPORT_CHUNK_SIZE,
+};
+
+enum {
+ TARGET_DEVICE_AWAKE,
+ TARGET_DEVICE_SLEEP,
+ TARGET_DEVICE_PWRDN,
+ TARGET_DEVICE_PWRSAVE,
+ TARGET_DEVICE_SUSPEND,
+ TARGET_DEVICE_RESUME,
+};
+
+/* this is in net/ieee80211.h, but that conflicts with the mac80211 headers */
+#define IEEE80211_2ADDR_LEN 16
+
+#define AR5523_MIN_RXBUFSZ \
+ (((sizeof(__be32) + IEEE80211_2ADDR_LEN + \
+ sizeof(struct ar5523_rx_desc)) + 3) & ~3)
diff --git a/drivers/net/wireless/ath/ar9170/Kconfig b/drivers/net/wireless/ath/ar9170/Kconfig
deleted file mode 100644
index b99e3263ee6d..000000000000
--- a/drivers/net/wireless/ath/ar9170/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
-config AR9170_USB
- tristate "Atheros AR9170 802.11n USB support"
- depends on USB && MAC80211 && WLAN_80211 && EXPERIMENTAL
- select FW_LOADER
- select ATH_COMMON
- help
- This is a driver for the Atheros "otus" 802.11n USB devices.
-
- These devices require additional firmware (2 files).
- For now, these files can be downloaded from here:
- http://wireless.kernel.org/en/users/Drivers/ar9170
-
- If you choose to build a module, it'll be called ar9170usb.
-
-config AR9170_LEDS
- bool
- depends on AR9170_USB && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = AR9170_USB)
- default y
diff --git a/drivers/net/wireless/ath/ar9170/Makefile b/drivers/net/wireless/ath/ar9170/Makefile
deleted file mode 100644
index 8d91c7ee3215..000000000000
--- a/drivers/net/wireless/ath/ar9170/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-ar9170usb-objs := usb.o main.o cmd.o mac.o phy.o led.o
-
-obj-$(CONFIG_AR9170_USB) += ar9170usb.o
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
deleted file mode 100644
index 17bd3eaf3e03..000000000000
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Atheros AR9170 driver
- *
- * Driver specific definitions
- *
- * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, see
- * http://www.gnu.org/licenses/.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * Copyright (c) 2007-2008 Atheros Communications, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#ifndef __AR9170_H
-#define __AR9170_H
-
-#include <linux/completion.h>
-#include <linux/spinlock.h>
-#include <net/cfg80211.h>
-#include <net/mac80211.h>
-#ifdef CONFIG_AR9170_LEDS
-#include <linux/leds.h>
-#endif /* CONFIG_AR9170_LEDS */
-#include "eeprom.h"
-#include "hw.h"
-
-#include "../regd.h"
-
-#define PAYLOAD_MAX (AR9170_MAX_CMD_LEN/4 - 1)
-
-enum ar9170_bw {
- AR9170_BW_20,
- AR9170_BW_40_BELOW,
- AR9170_BW_40_ABOVE,
-
- __AR9170_NUM_BW,
-};
-
-static inline enum ar9170_bw nl80211_to_ar9170(enum nl80211_channel_type type)
-{
- switch (type) {
- case NL80211_CHAN_NO_HT:
- case NL80211_CHAN_HT20:
- return AR9170_BW_20;
- case NL80211_CHAN_HT40MINUS:
- return AR9170_BW_40_BELOW;
- case NL80211_CHAN_HT40PLUS:
- return AR9170_BW_40_ABOVE;
- default:
- BUG();
- }
-}
-
-enum ar9170_rf_init_mode {
- AR9170_RFI_NONE,
- AR9170_RFI_WARM,
- AR9170_RFI_COLD,
-};
-
-#define AR9170_MAX_RX_BUFFER_SIZE 8192
-
-#ifdef CONFIG_AR9170_LEDS
-struct ar9170;
-
-struct ar9170_led {
- struct ar9170 *ar;
- struct led_classdev l;
- char name[32];
- unsigned int toggled;
- bool registered;
-};
-
-#endif /* CONFIG_AR9170_LEDS */
-
-enum ar9170_device_state {
- AR9170_UNKNOWN_STATE,
- AR9170_STOPPED,
- AR9170_IDLE,
- AR9170_STARTED,
- AR9170_ASSOCIATED,
-};
-
-struct ar9170_rxstream_mpdu_merge {
- struct ar9170_rx_head plcp;
- bool has_plcp;
-};
-
-struct ar9170 {
- struct ieee80211_hw *hw;
- struct mutex mutex;
- enum ar9170_device_state state;
- unsigned long bad_hw_nagger;
-
- int (*open)(struct ar9170 *);
- void (*stop)(struct ar9170 *);
- int (*tx)(struct ar9170 *, struct sk_buff *, bool, unsigned int);
- int (*exec_cmd)(struct ar9170 *, enum ar9170_cmd, u32 ,
- void *, u32 , void *);
- void (*callback_cmd)(struct ar9170 *, u32 , void *);
-
- /* interface mode settings */
- struct ieee80211_vif *vif;
- u8 mac_addr[ETH_ALEN];
- u8 bssid[ETH_ALEN];
-
- /* beaconing */
- struct sk_buff *beacon;
- struct work_struct beacon_work;
-
- /* cryptographic engine */
- u64 usedkeys;
- bool rx_software_decryption;
- bool disable_offload;
-
- /* filter settings */
- struct work_struct filter_config_work;
- u64 cur_mc_hash, want_mc_hash;
- u32 cur_filter, want_filter;
- unsigned int filter_changed;
- unsigned int filter_state;
- bool sniffer_enabled;
-
- /* PHY */
- struct ieee80211_channel *channel;
- int noise[4];
-
- /* power calibration data */
- u8 power_5G_leg[4];
- u8 power_2G_cck[4];
- u8 power_2G_ofdm[4];
- u8 power_5G_ht20[8];
- u8 power_5G_ht40[8];
- u8 power_2G_ht20[8];
- u8 power_2G_ht40[8];
-
-#ifdef CONFIG_AR9170_LEDS
- struct delayed_work led_work;
- struct ar9170_led leds[AR9170_NUM_LEDS];
-#endif /* CONFIG_AR9170_LEDS */
-
- /* qos queue settings */
- spinlock_t tx_stats_lock;
- struct ieee80211_tx_queue_stats tx_stats[5];
- struct ieee80211_tx_queue_params edcf[5];
-
- spinlock_t cmdlock;
- __le32 cmdbuf[PAYLOAD_MAX + 1];
-
- /* MAC statistics */
- struct ieee80211_low_level_stats stats;
-
- /* EEPROM */
- struct ar9170_eeprom eeprom;
- struct ath_regulatory regulatory;
-
- /* global tx status for unregistered Stations. */
- struct sk_buff_head global_tx_status;
- struct sk_buff_head global_tx_status_waste;
- struct delayed_work tx_status_janitor;
-
- /* rxstream mpdu merge */
- struct ar9170_rxstream_mpdu_merge rx_mpdu;
- struct sk_buff *rx_failover;
- int rx_failover_missing;
-};
-
-struct ar9170_sta_info {
- struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
-};
-
-#define IS_STARTED(a) (a->state >= AR9170_STARTED)
-#define IS_ACCEPTING_CMD(a) (a->state >= AR9170_IDLE)
-
-#define AR9170_FILTER_CHANGED_PROMISC BIT(0)
-#define AR9170_FILTER_CHANGED_MULTICAST BIT(1)
-#define AR9170_FILTER_CHANGED_FRAMEFILTER BIT(2)
-
-/* exported interface */
-void *ar9170_alloc(size_t priv_size);
-int ar9170_register(struct ar9170 *ar, struct device *pdev);
-void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb);
-void ar9170_unregister(struct ar9170 *ar);
-void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb,
- bool update_statistics, u16 tx_status);
-
-/* MAC */
-int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
-int ar9170_init_mac(struct ar9170 *ar);
-int ar9170_set_qos(struct ar9170 *ar);
-int ar9170_update_multicast(struct ar9170 *ar);
-int ar9170_update_frame_filter(struct ar9170 *ar);
-int ar9170_set_operating_mode(struct ar9170 *ar);
-int ar9170_set_beacon_timers(struct ar9170 *ar);
-int ar9170_set_hwretry_limit(struct ar9170 *ar, u32 max_retry);
-int ar9170_update_beacon(struct ar9170 *ar);
-void ar9170_new_beacon(struct work_struct *work);
-int ar9170_upload_key(struct ar9170 *ar, u8 id, const u8 *mac, u8 ktype,
- u8 keyidx, u8 *keydata, int keylen);
-int ar9170_disable_key(struct ar9170 *ar, u8 id);
-
-/* LEDs */
-#ifdef CONFIG_AR9170_LEDS
-int ar9170_register_leds(struct ar9170 *ar);
-void ar9170_unregister_leds(struct ar9170 *ar);
-#endif /* CONFIG_AR9170_LEDS */
-int ar9170_init_leds(struct ar9170 *ar);
-int ar9170_set_leds_state(struct ar9170 *ar, u32 led_state);
-
-/* PHY / RF */
-int ar9170_init_phy(struct ar9170 *ar, enum ieee80211_band band);
-int ar9170_init_rf(struct ar9170 *ar);
-int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
- enum ar9170_rf_init_mode rfi, enum ar9170_bw bw);
-
-#endif /* __AR9170_H */
diff --git a/drivers/net/wireless/ath/ar9170/cmd.c b/drivers/net/wireless/ath/ar9170/cmd.c
deleted file mode 100644
index f57a6200167b..000000000000
--- a/drivers/net/wireless/ath/ar9170/cmd.c
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Atheros AR9170 driver
- *
- * Basic HW register/memory/command access functions
- *
- * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, see
- * http://www.gnu.org/licenses/.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * Copyright (c) 2007-2008 Atheros Communications, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include "ar9170.h"
-#include "cmd.h"
-
-int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len)
-{
- int err;
-
- if (unlikely(!IS_ACCEPTING_CMD(ar)))
- return 0;
-
- err = ar->exec_cmd(ar, AR9170_CMD_WMEM, len, (u8 *) data, 0, NULL);
- if (err)
- printk(KERN_DEBUG "%s: writing memory failed\n",
- wiphy_name(ar->hw->wiphy));
- return err;
-}
-
-int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
-{
- __le32 buf[2] = {
- cpu_to_le32(reg),
- cpu_to_le32(val),
- };
- int err;
-
- if (unlikely(!IS_ACCEPTING_CMD(ar)))
- return 0;
-
- err = ar->exec_cmd(ar, AR9170_CMD_WREG, sizeof(buf),
- (u8 *) buf, 0, NULL);
- if (err)
- printk(KERN_DEBUG "%s: writing reg %#x (val %#x) failed\n",
- wiphy_name(ar->hw->wiphy), reg, val);
- return err;
-}
-
-static int ar9170_read_mreg(struct ar9170 *ar, int nregs,
- const u32 *regs, u32 *out)
-{
- int i, err;
- __le32 *offs, *res;
-
- if (unlikely(!IS_ACCEPTING_CMD(ar)))
- return 0;
-
- /* abuse "out" for the register offsets, must be same length */
- offs = (__le32 *)out;
- for (i = 0; i < nregs; i++)
- offs[i] = cpu_to_le32(regs[i]);
-
- /* also use the same buffer for the input */
- res = (__le32 *)out;
-
- err = ar->exec_cmd(ar, AR9170_CMD_RREG,
- 4 * nregs, (u8 *)offs,
- 4 * nregs, (u8 *)res);
- if (err)
- return err;
-
- /* convert result to cpu endian */
- for (i = 0; i < nregs; i++)
- out[i] = le32_to_cpu(res[i]);
-
- return 0;
-}
-
-int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val)
-{
- return ar9170_read_mreg(ar, 1, &reg, val);
-}
-
-int ar9170_echo_test(struct ar9170 *ar, u32 v)
-{
- __le32 echobuf = cpu_to_le32(v);
- __le32 echores;
- int err;
-
- if (unlikely(!IS_ACCEPTING_CMD(ar)))
- return -ENODEV;
-
- err = ar->exec_cmd(ar, AR9170_CMD_ECHO,
- 4, (u8 *)&echobuf,
- 4, (u8 *)&echores);
- if (err)
- return err;
-
- if (echobuf != echores)
- return -EINVAL;
-
- return 0;
-}
diff --git a/drivers/net/wireless/ath/ar9170/cmd.h b/drivers/net/wireless/ath/ar9170/cmd.h
deleted file mode 100644
index a4f0e50e52b4..000000000000
--- a/drivers/net/wireless/ath/ar9170/cmd.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Atheros AR9170 driver
- *
- * Basic HW register/memory/command access functions
- *
- * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, see
- * http://www.gnu.org/licenses/.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * Copyright (c) 2007-2008 Atheros Communications, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#ifndef __CMD_H
-#define __CMD_H
-
-#include "ar9170.h"
-
-/* basic HW access */
-int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len);
-int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val);
-int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val);
-int ar9170_echo_test(struct ar9170 *ar, u32 v);
-
-/*
- * Macros to facilitate writing multiple registers in a single
- * write-combining USB command. Note that when the first group
- * fails the whole thing will fail without any others attempted,
- * but you won't know which write in the group failed.
- */
-#define ar9170_regwrite_begin(ar) \
-do { \
- int __nreg = 0, __err = 0; \
- struct ar9170 *__ar = ar;
-
-#define ar9170_regwrite(r, v) do { \
- __ar->cmdbuf[2 * __nreg + 1] = cpu_to_le32(r); \
- __ar->cmdbuf[2 * __nreg + 2] = cpu_to_le32(v); \
- __nreg++; \
- if ((__nreg >= PAYLOAD_MAX/2)) { \
- if (IS_ACCEPTING_CMD(__ar)) \
- __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
- 8 * __nreg, \
- (u8 *) &__ar->cmdbuf[1], \
- 0, NULL); \
- __nreg = 0; \
- if (__err) \
- goto __regwrite_out; \
- } \
-} while (0)
-
-#define ar9170_regwrite_finish() \
-__regwrite_out : \
- if (__nreg) { \
- if (IS_ACCEPTING_CMD(__ar)) \
- __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
- 8 * __nreg, \
- (u8 *) &__ar->cmdbuf[1], \
- 0, NULL); \
- __nreg = 0; \
- }
-
-#define ar9170_regwrite_result() \
- __err; \
-} while (0);
-
-#endif /* __CMD_H */
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
deleted file mode 100644
index 3293e0fb24fb..000000000000
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ /dev/null
@@ -1,422 +0,0 @@
-/*
- * Atheros AR9170 driver
- *
- * Hardware-specific definitions
- *
- * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, see
- * http://www.gnu.org/licenses/.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * Copyright (c) 2007-2008 Atheros Communications, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#ifndef __AR9170_HW_H
-#define __AR9170_HW_H
-
-#define AR9170_MAX_CMD_LEN 64
-
-enum ar9170_cmd {
- AR9170_CMD_RREG = 0x00,
- AR9170_CMD_WREG = 0x01,
- AR9170_CMD_RMEM = 0x02,
- AR9170_CMD_WMEM = 0x03,
- AR9170_CMD_BITAND = 0x04,
- AR9170_CMD_BITOR = 0x05,
- AR9170_CMD_EKEY = 0x28,
- AR9170_CMD_DKEY = 0x29,
- AR9170_CMD_FREQUENCY = 0x30,
- AR9170_CMD_RF_INIT = 0x31,
- AR9170_CMD_SYNTH = 0x32,
- AR9170_CMD_FREQ_START = 0x33,
- AR9170_CMD_ECHO = 0x80,
- AR9170_CMD_TALLY = 0x81,
- AR9170_CMD_TALLY_APD = 0x82,
- AR9170_CMD_CONFIG = 0x83,
- AR9170_CMD_RESET = 0x90,
- AR9170_CMD_DKRESET = 0x91,
- AR9170_CMD_DKTX_STATUS = 0x92,
- AR9170_CMD_FDC = 0xA0,
- AR9170_CMD_WREEPROM = 0xB0,
- AR9170_CMD_WFLASH = 0xB0,
- AR9170_CMD_FLASH_ERASE = 0xB1,
- AR9170_CMD_FLASH_PROG = 0xB2,
- AR9170_CMD_FLASH_CHKSUM = 0xB3,
- AR9170_CMD_FLASH_READ = 0xB4,
- AR9170_CMD_FW_DL_INIT = 0xB5,
- AR9170_CMD_MEM_WREEPROM = 0xBB,
-};
-
-/* endpoints */
-#define AR9170_EP_TX 1
-#define AR9170_EP_RX 2
-#define AR9170_EP_IRQ 3
-#define AR9170_EP_CMD 4
-
-#define AR9170_EEPROM_START 0x1600
-
-#define AR9170_GPIO_REG_BASE 0x1d0100
-#define AR9170_GPIO_REG_PORT_TYPE AR9170_GPIO_REG_BASE
-#define AR9170_GPIO_REG_DATA (AR9170_GPIO_REG_BASE + 4)
-#define AR9170_NUM_LEDS 2
-
-
-#define AR9170_USB_REG_BASE 0x1e1000
-#define AR9170_USB_REG_DMA_CTL (AR9170_USB_REG_BASE + 0x108)
-#define AR9170_DMA_CTL_ENABLE_TO_DEVICE 0x1
-#define AR9170_DMA_CTL_ENABLE_FROM_DEVICE 0x2
-#define AR9170_DMA_CTL_HIGH_SPEED 0x4
-#define AR9170_DMA_CTL_PACKET_MODE 0x8
-
-#define AR9170_USB_REG_MAX_AGG_UPLOAD (AR9170_USB_REG_BASE + 0x110)
-#define AR9170_USB_REG_UPLOAD_TIME_CTL (AR9170_USB_REG_BASE + 0x114)
-
-
-
-#define AR9170_MAC_REG_BASE 0x1c3000
-
-#define AR9170_MAC_REG_TSF_L (AR9170_MAC_REG_BASE + 0x514)
-#define AR9170_MAC_REG_TSF_H (AR9170_MAC_REG_BASE + 0x518)
-
-#define AR9170_MAC_REG_ATIM_WINDOW (AR9170_MAC_REG_BASE + 0x51C)
-#define AR9170_MAC_REG_BCN_PERIOD (AR9170_MAC_REG_BASE + 0x520)
-#define AR9170_MAC_REG_PRETBTT (AR9170_MAC_REG_BASE + 0x524)
-
-#define AR9170_MAC_REG_MAC_ADDR_L (AR9170_MAC_REG_BASE + 0x610)
-#define AR9170_MAC_REG_MAC_ADDR_H (AR9170_MAC_REG_BASE + 0x614)
-#define AR9170_MAC_REG_BSSID_L (AR9170_MAC_REG_BASE + 0x618)
-#define AR9170_MAC_REG_BSSID_H (AR9170_MAC_REG_BASE + 0x61c)
-
-#define AR9170_MAC_REG_GROUP_HASH_TBL_L (AR9170_MAC_REG_BASE + 0x624)
-#define AR9170_MAC_REG_GROUP_HASH_TBL_H (AR9170_MAC_REG_BASE + 0x628)
-
-#define AR9170_MAC_REG_RX_TIMEOUT (AR9170_MAC_REG_BASE + 0x62C)
-
-#define AR9170_MAC_REG_BASIC_RATE (AR9170_MAC_REG_BASE + 0x630)
-#define AR9170_MAC_REG_MANDATORY_RATE (AR9170_MAC_REG_BASE + 0x634)
-#define AR9170_MAC_REG_RTS_CTS_RATE (AR9170_MAC_REG_BASE + 0x638)
-#define AR9170_MAC_REG_BACKOFF_PROTECT (AR9170_MAC_REG_BASE + 0x63c)
-#define AR9170_MAC_REG_RX_THRESHOLD (AR9170_MAC_REG_BASE + 0x640)
-#define AR9170_MAC_REG_RX_PE_DELAY (AR9170_MAC_REG_BASE + 0x64C)
-
-#define AR9170_MAC_REG_DYNAMIC_SIFS_ACK (AR9170_MAC_REG_BASE + 0x658)
-#define AR9170_MAC_REG_SNIFFER (AR9170_MAC_REG_BASE + 0x674)
-#define AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC BIT(0)
-#define AR9170_MAC_REG_SNIFFER_DEFAULTS 0x02000000
-#define AR9170_MAC_REG_ENCRYPTION (AR9170_MAC_REG_BASE + 0x678)
-#define AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE BIT(3)
-#define AR9170_MAC_REG_ENCRYPTION_DEFAULTS 0x70
-
-#define AR9170_MAC_REG_MISC_680 (AR9170_MAC_REG_BASE + 0x680)
-#define AR9170_MAC_REG_TX_UNDERRUN (AR9170_MAC_REG_BASE + 0x688)
-
-#define AR9170_MAC_REG_FRAMETYPE_FILTER (AR9170_MAC_REG_BASE + 0x68c)
-#define AR9170_MAC_REG_FTF_ASSOC_REQ BIT(0)
-#define AR9170_MAC_REG_FTF_ASSOC_RESP BIT(1)
-#define AR9170_MAC_REG_FTF_REASSOC_REQ BIT(2)
-#define AR9170_MAC_REG_FTF_REASSOC_RESP BIT(3)
-#define AR9170_MAC_REG_FTF_PRB_REQ BIT(4)
-#define AR9170_MAC_REG_FTF_PRB_RESP BIT(5)
-#define AR9170_MAC_REG_FTF_BIT6 BIT(6)
-#define AR9170_MAC_REG_FTF_BIT7 BIT(7)
-#define AR9170_MAC_REG_FTF_BEACON BIT(8)
-#define AR9170_MAC_REG_FTF_ATIM BIT(9)
-#define AR9170_MAC_REG_FTF_DEASSOC BIT(10)
-#define AR9170_MAC_REG_FTF_AUTH BIT(11)
-#define AR9170_MAC_REG_FTF_DEAUTH BIT(12)
-#define AR9170_MAC_REG_FTF_BIT13 BIT(13)
-#define AR9170_MAC_REG_FTF_BIT14 BIT(14)
-#define AR9170_MAC_REG_FTF_BIT15 BIT(15)
-#define AR9170_MAC_REG_FTF_BAR BIT(24)
-#define AR9170_MAC_REG_FTF_BIT25 BIT(25)
-#define AR9170_MAC_REG_FTF_PSPOLL BIT(26)
-#define AR9170_MAC_REG_FTF_RTS BIT(27)
-#define AR9170_MAC_REG_FTF_CTS BIT(28)
-#define AR9170_MAC_REG_FTF_ACK BIT(29)
-#define AR9170_MAC_REG_FTF_CFE BIT(30)
-#define AR9170_MAC_REG_FTF_CFE_ACK BIT(31)
-#define AR9170_MAC_REG_FTF_DEFAULTS 0x0500ffff
-#define AR9170_MAC_REG_FTF_MONITOR 0xfd00ffff
-
-#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6A0)
-#define AR9170_MAC_REG_RX_CRC32 (AR9170_MAC_REG_BASE + 0x6A4)
-#define AR9170_MAC_REG_RX_CRC16 (AR9170_MAC_REG_BASE + 0x6A8)
-#define AR9170_MAC_REG_RX_ERR_DECRYPTION_UNI (AR9170_MAC_REG_BASE + 0x6AC)
-#define AR9170_MAC_REG_RX_OVERRUN (AR9170_MAC_REG_BASE + 0x6B0)
-#define AR9170_MAC_REG_RX_ERR_DECRYPTION_MUL (AR9170_MAC_REG_BASE + 0x6BC)
-#define AR9170_MAC_REG_TX_RETRY (AR9170_MAC_REG_BASE + 0x6CC)
-#define AR9170_MAC_REG_TX_TOTAL (AR9170_MAC_REG_BASE + 0x6F4)
-
-
-#define AR9170_MAC_REG_ACK_EXTENSION (AR9170_MAC_REG_BASE + 0x690)
-#define AR9170_MAC_REG_EIFS_AND_SIFS (AR9170_MAC_REG_BASE + 0x698)
-
-#define AR9170_MAC_REG_SLOT_TIME (AR9170_MAC_REG_BASE + 0x6F0)
-
-#define AR9170_MAC_REG_POWERMANAGEMENT (AR9170_MAC_REG_BASE + 0x700)
-#define AR9170_MAC_REG_POWERMGT_IBSS 0xe0
-#define AR9170_MAC_REG_POWERMGT_AP 0xa1
-#define AR9170_MAC_REG_POWERMGT_STA 0x2
-#define AR9170_MAC_REG_POWERMGT_AP_WDS 0x3
-#define AR9170_MAC_REG_POWERMGT_DEFAULTS (0xf << 24)
-
-#define AR9170_MAC_REG_ROLL_CALL_TBL_L (AR9170_MAC_REG_BASE + 0x704)
-#define AR9170_MAC_REG_ROLL_CALL_TBL_H (AR9170_MAC_REG_BASE + 0x708)
-
-#define AR9170_MAC_REG_AC0_CW (AR9170_MAC_REG_BASE + 0xB00)
-#define AR9170_MAC_REG_AC1_CW (AR9170_MAC_REG_BASE + 0xB04)
-#define AR9170_MAC_REG_AC2_CW (AR9170_MAC_REG_BASE + 0xB08)
-#define AR9170_MAC_REG_AC3_CW (AR9170_MAC_REG_BASE + 0xB0C)
-#define AR9170_MAC_REG_AC4_CW (AR9170_MAC_REG_BASE + 0xB10)
-#define AR9170_MAC_REG_AC1_AC0_AIFS (AR9170_MAC_REG_BASE + 0xB14)
-#define AR9170_MAC_REG_AC3_AC2_AIFS (AR9170_MAC_REG_BASE + 0xB18)
-
-#define AR9170_MAC_REG_RETRY_MAX (AR9170_MAC_REG_BASE + 0xB28)
-
-#define AR9170_MAC_REG_FCS_SELECT (AR9170_MAC_REG_BASE + 0xBB0)
-#define AR9170_MAC_FCS_SWFCS 0x1
-#define AR9170_MAC_FCS_FIFO_PROT 0x4
-
-
-#define AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND (AR9170_MAC_REG_BASE + 0xB30)
-
-#define AR9170_MAC_REG_AC1_AC0_TXOP (AR9170_MAC_REG_BASE + 0xB44)
-#define AR9170_MAC_REG_AC3_AC2_TXOP (AR9170_MAC_REG_BASE + 0xB48)
-
-#define AR9170_MAC_REG_AMPDU_SET (AR9170_MAC_REG_BASE + 0xba0)
-
-#define AR9170_MAC_REG_ACK_TABLE (AR9170_MAC_REG_BASE + 0xC00)
-#define AR9170_MAC_REG_AMPDU_RX_THRESH (AR9170_MAC_REG_BASE + 0xC50)
-
-#define AR9170_MAC_REG_TXRX_MPI (AR9170_MAC_REG_BASE + 0xD7C)
-#define AR9170_MAC_TXRX_MPI_TX_MPI_MASK 0x0000000f
-#define AR9170_MAC_TXRX_MPI_TX_TO_MASK 0x0000fff0
-#define AR9170_MAC_TXRX_MPI_RX_MPI_MASK 0x000f0000
-#define AR9170_MAC_TXRX_MPI_RX_TO_MASK 0xfff00000
-
-#define AR9170_MAC_REG_BCN_ADDR (AR9170_MAC_REG_BASE + 0xD84)
-#define AR9170_MAC_REG_BCN_LENGTH (AR9170_MAC_REG_BASE + 0xD88)
-#define AR9170_MAC_REG_BCN_PLCP (AR9170_MAC_REG_BASE + 0xD90)
-#define AR9170_MAC_REG_BCN_CTRL (AR9170_MAC_REG_BASE + 0xD94)
-#define AR9170_MAC_REG_BCN_HT1 (AR9170_MAC_REG_BASE + 0xDA0)
-#define AR9170_MAC_REG_BCN_HT2 (AR9170_MAC_REG_BASE + 0xDA4)
-
-
-#define AR9170_PWR_REG_BASE 0x1D4000
-
-#define AR9170_PWR_REG_CLOCK_SEL (AR9170_PWR_REG_BASE + 0x008)
-#define AR9170_PWR_CLK_AHB_40MHZ 0
-#define AR9170_PWR_CLK_AHB_20_22MHZ 1
-#define AR9170_PWR_CLK_AHB_40_44MHZ 2
-#define AR9170_PWR_CLK_AHB_80_88MHZ 3
-#define AR9170_PWR_CLK_DAC_160_INV_DLY 0x70
-
-
-/* put beacon here in memory */
-#define AR9170_BEACON_BUFFER_ADDRESS 0x117900
-
-
-struct ar9170_tx_control {
- __le16 length;
- __le16 mac_control;
- __le32 phy_control;
- u8 frame_data[0];
-} __packed;
-
-/* these are either-or */
-#define AR9170_TX_MAC_PROT_RTS 0x0001
-#define AR9170_TX_MAC_PROT_CTS 0x0002
-
-#define AR9170_TX_MAC_NO_ACK 0x0004
-/* if unset, MAC will only do SIFS space before frame */
-#define AR9170_TX_MAC_BACKOFF 0x0008
-#define AR9170_TX_MAC_BURST 0x0010
-#define AR9170_TX_MAC_AGGR 0x0020
-
-/* encryption is a two-bit field */
-#define AR9170_TX_MAC_ENCR_NONE 0x0000
-#define AR9170_TX_MAC_ENCR_RC4 0x0040
-#define AR9170_TX_MAC_ENCR_CENC 0x0080
-#define AR9170_TX_MAC_ENCR_AES 0x00c0
-
-#define AR9170_TX_MAC_MMIC 0x0100
-#define AR9170_TX_MAC_HW_DURATION 0x0200
-#define AR9170_TX_MAC_QOS_SHIFT 10
-#define AR9170_TX_MAC_QOS_MASK (3 << AR9170_TX_MAC_QOS_SHIFT)
-#define AR9170_TX_MAC_AGGR_QOS_BIT1 0x0400
-#define AR9170_TX_MAC_AGGR_QOS_BIT2 0x0800
-#define AR9170_TX_MAC_DISABLE_TXOP 0x1000
-#define AR9170_TX_MAC_TXOP_RIFS 0x2000
-#define AR9170_TX_MAC_IMM_AMPDU 0x4000
-#define AR9170_TX_MAC_RATE_PROBE 0x8000
-
-/* either-or */
-#define AR9170_TX_PHY_MOD_CCK 0x00000000
-#define AR9170_TX_PHY_MOD_OFDM 0x00000001
-#define AR9170_TX_PHY_MOD_HT 0x00000002
-
-/* depends on modulation */
-#define AR9170_TX_PHY_SHORT_PREAMBLE 0x00000004
-#define AR9170_TX_PHY_GREENFIELD 0x00000004
-
-#define AR9170_TX_PHY_BW_SHIFT 3
-#define AR9170_TX_PHY_BW_MASK (3 << AR9170_TX_PHY_BW_SHIFT)
-#define AR9170_TX_PHY_BW_20MHZ 0
-#define AR9170_TX_PHY_BW_40MHZ 2
-#define AR9170_TX_PHY_BW_40MHZ_DUP 3
-
-#define AR9170_TX_PHY_TX_HEAVY_CLIP_SHIFT 6
-#define AR9170_TX_PHY_TX_HEAVY_CLIP_MASK (7 << AR9170_TX_PHY_TX_HEAVY_CLIP_SHIFT)
-
-#define AR9170_TX_PHY_TX_PWR_SHIFT 9
-#define AR9170_TX_PHY_TX_PWR_MASK (0x3f << AR9170_TX_PHY_TX_PWR_SHIFT)
-
-/* not part of the hw-spec */
-#define AR9170_TX_PHY_QOS_SHIFT 25
-#define AR9170_TX_PHY_QOS_MASK (3 << AR9170_TX_PHY_QOS_SHIFT)
-
-#define AR9170_TX_PHY_TXCHAIN_SHIFT 15
-#define AR9170_TX_PHY_TXCHAIN_MASK (7 << AR9170_TX_PHY_TXCHAIN_SHIFT)
-#define AR9170_TX_PHY_TXCHAIN_1 1
-/* use for cck, ofdm 6/9/12/18/24 and HT if capable */
-#define AR9170_TX_PHY_TXCHAIN_2 5
-
-#define AR9170_TX_PHY_MCS_SHIFT 18
-#define AR9170_TX_PHY_MCS_MASK (0x7f << AR9170_TX_PHY_MCS_SHIFT)
-
-#define AR9170_TX_PHY_SHORT_GI 0x80000000
-
-struct ar9170_rx_head {
- u8 plcp[12];
-} __packed;
-
-struct ar9170_rx_phystatus {
- union {
- struct {
- u8 rssi_ant0, rssi_ant1, rssi_ant2,
- rssi_ant0x, rssi_ant1x, rssi_ant2x,
- rssi_combined;
- } __packed;
- u8 rssi[7];
- } __packed;
-
- u8 evm_stream0[6], evm_stream1[6];
- u8 phy_err;
-} __packed;
-
-struct ar9170_rx_macstatus {
- u8 SAidx, DAidx;
- u8 error;
- u8 status;
-} __packed;
-
-#define AR9170_ENC_ALG_NONE 0x0
-#define AR9170_ENC_ALG_WEP64 0x1
-#define AR9170_ENC_ALG_TKIP 0x2
-#define AR9170_ENC_ALG_AESCCMP 0x4
-#define AR9170_ENC_ALG_WEP128 0x5
-#define AR9170_ENC_ALG_WEP256 0x6
-#define AR9170_ENC_ALG_CENC 0x7
-
-#define AR9170_RX_ENC_SOFTWARE 0x8
-
-static inline u8 ar9170_get_decrypt_type(struct ar9170_rx_macstatus *t)
-{
- return (t->SAidx & 0xc0) >> 4 |
- (t->DAidx & 0xc0) >> 6;
-}
-
-#define AR9170_RX_STATUS_MODULATION_MASK 0x03
-#define AR9170_RX_STATUS_MODULATION_CCK 0x00
-#define AR9170_RX_STATUS_MODULATION_OFDM 0x01
-#define AR9170_RX_STATUS_MODULATION_HT 0x02
-#define AR9170_RX_STATUS_MODULATION_DUPOFDM 0x03
-
-/* depends on modulation */
-#define AR9170_RX_STATUS_SHORT_PREAMBLE 0x08
-#define AR9170_RX_STATUS_GREENFIELD 0x08
-
-#define AR9170_RX_STATUS_MPDU_MASK 0x30
-#define AR9170_RX_STATUS_MPDU_SINGLE 0x00
-#define AR9170_RX_STATUS_MPDU_FIRST 0x20
-#define AR9170_RX_STATUS_MPDU_MIDDLE 0x30
-#define AR9170_RX_STATUS_MPDU_LAST 0x10
-
-#define AR9170_RX_ERROR_RXTO 0x01
-#define AR9170_RX_ERROR_OVERRUN 0x02
-#define AR9170_RX_ERROR_DECRYPT 0x04
-#define AR9170_RX_ERROR_FCS 0x08
-#define AR9170_RX_ERROR_WRONG_RA 0x10
-#define AR9170_RX_ERROR_PLCP 0x20
-#define AR9170_RX_ERROR_MMIC 0x40
-#define AR9170_RX_ERROR_FATAL 0x80
-
-struct ar9170_cmd_tx_status {
- __le16 unkn;
- u8 dst[ETH_ALEN];
- __le32 rate;
- __le16 status;
-} __packed;
-
-#define AR9170_TX_STATUS_COMPLETE 0x00
-#define AR9170_TX_STATUS_RETRY 0x01
-#define AR9170_TX_STATUS_FAILED 0x02
-
-struct ar9170_cmd_ba_failed_count {
- __le16 failed;
- __le16 rate;
-} __packed;
-
-struct ar9170_cmd_response {
- u8 flag;
- u8 type;
-
- union {
- struct ar9170_cmd_tx_status tx_status;
- struct ar9170_cmd_ba_failed_count ba_fail_cnt;
- u8 data[0];
- };
-} __packed;
-
-/* QoS */
-
-/* mac80211 queue to HW/FW map */
-static const u8 ar9170_qos_hwmap[4] = { 3, 2, 0, 1 };
-
-/* HW/FW queue to mac80211 map */
-static const u8 ar9170_qos_mac80211map[4] = { 2, 3, 1, 0 };
-
-enum ar9170_txq {
- AR9170_TXQ_BE,
- AR9170_TXQ_BK,
- AR9170_TXQ_VI,
- AR9170_TXQ_VO,
-
- __AR9170_NUM_TXQ,
-};
-
-#endif /* __AR9170_HW_H */
diff --git a/drivers/net/wireless/ath/ar9170/mac.c b/drivers/net/wireless/ath/ar9170/mac.c
deleted file mode 100644
index 43aeb69685d3..000000000000
--- a/drivers/net/wireless/ath/ar9170/mac.c
+++ /dev/null
@@ -1,475 +0,0 @@
-/*
- * Atheros AR9170 driver
- *
- * MAC programming
- *
- * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, see
- * http://www.gnu.org/licenses/.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * Copyright (c) 2007-2008 Atheros Communications, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#include "ar9170.h"
-#include "cmd.h"
-
-int ar9170_set_qos(struct ar9170 *ar)
-{
- ar9170_regwrite_begin(ar);
-
- ar9170_regwrite(AR9170_MAC_REG_AC0_CW, ar->edcf[0].cw_min |
- (ar->edcf[0].cw_max << 16));
- ar9170_regwrite(AR9170_MAC_REG_AC1_CW, ar->edcf[1].cw_min |
- (ar->edcf[1].cw_max << 16));
- ar9170_regwrite(AR9170_MAC_REG_AC2_CW, ar->edcf[2].cw_min |
- (ar->edcf[2].cw_max << 16));
- ar9170_regwrite(AR9170_MAC_REG_AC3_CW, ar->edcf[3].cw_min |
- (ar->edcf[3].cw_max << 16));
- ar9170_regwrite(AR9170_MAC_REG_AC4_CW, ar->edcf[4].cw_min |
- (ar->edcf[4].cw_max << 16));
-
- ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_AIFS,
- ((ar->edcf[0].aifs * 9 + 10)) |
- ((ar->edcf[1].aifs * 9 + 10) << 12) |
- ((ar->edcf[2].aifs * 9 + 10) << 24));
- ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_AIFS,
- ((ar->edcf[2].aifs * 9 + 10) >> 8) |
- ((ar->edcf[3].aifs * 9 + 10) << 4) |
- ((ar->edcf[4].aifs * 9 + 10) << 16));
-
- ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
- ar->edcf[0].txop | ar->edcf[1].txop << 16);
- ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
- ar->edcf[1].txop | ar->edcf[3].txop << 16);
-
- ar9170_regwrite_finish();
-
- return ar9170_regwrite_result();
-}
-
-static int ar9170_set_ampdu_density(struct ar9170 *ar, u8 mpdudensity)
-{
- u32 val;
-
- /* don't allow AMPDU density > 8us */
- if (mpdudensity > 6)
- return -EINVAL;
-
- /* Watch out! Otus uses slightly different density values. */
- val = 0x140a00 | (mpdudensity ? (mpdudensity + 1) : 0);
-
- ar9170_regwrite_begin(ar);
- ar9170_regwrite(AR9170_MAC_REG_AMPDU_SET, val);
- ar9170_regwrite_finish();
-
- return ar9170_regwrite_result();
-}
-
-int ar9170_init_mac(struct ar9170 *ar)
-{
- ar9170_regwrite_begin(ar);
-
- ar9170_regwrite(AR9170_MAC_REG_ACK_EXTENSION, 0x40);
-
- ar9170_regwrite(AR9170_MAC_REG_RETRY_MAX, 0);
-
- /* enable MMIC */
- ar9170_regwrite(AR9170_MAC_REG_SNIFFER,
- AR9170_MAC_REG_SNIFFER_DEFAULTS);
-
- ar9170_regwrite(AR9170_MAC_REG_RX_THRESHOLD, 0xc1f80);
-
- ar9170_regwrite(AR9170_MAC_REG_RX_PE_DELAY, 0x70);
- ar9170_regwrite(AR9170_MAC_REG_EIFS_AND_SIFS, 0xa144000);
- ar9170_regwrite(AR9170_MAC_REG_SLOT_TIME, 9 << 10);
-
- /* CF-END mode */
- ar9170_regwrite(0x1c3b2c, 0x19000000);
-
- /* NAV protects ACK only (in TXOP) */
- ar9170_regwrite(0x1c3b38, 0x201);
-
- /* Set Beacon PHY CTRL's TPC to 0x7, TA1=1 */
- /* OTUS set AM to 0x1 */
- ar9170_regwrite(AR9170_MAC_REG_BCN_HT1, 0x8000170);
-
- ar9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105);
-
- /* AGG test code*/
- /* Aggregation MAX number and timeout */
- ar9170_regwrite(0x1c3b9c, 0x10000a);
-
- ar9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
- AR9170_MAC_REG_FTF_DEFAULTS);
-
- /* Enable deaggregator, response in sniffer mode */
- ar9170_regwrite(0x1c3c40, 0x1 | 1<<30);
-
- /* rate sets */
- ar9170_regwrite(AR9170_MAC_REG_BASIC_RATE, 0x150f);
- ar9170_regwrite(AR9170_MAC_REG_MANDATORY_RATE, 0x150f);
- ar9170_regwrite(AR9170_MAC_REG_RTS_CTS_RATE, 0x10b01bb);
-
- /* MIMO response control */
- ar9170_regwrite(0x1c3694, 0x4003C1E);/* bit 26~28 otus-AM */
-
- /* switch MAC to OTUS interface */
- ar9170_regwrite(0x1c3600, 0x3);
-
- ar9170_regwrite(AR9170_MAC_REG_AMPDU_RX_THRESH, 0xffff);
-
- /* set PHY register read timeout (??) */
- ar9170_regwrite(AR9170_MAC_REG_MISC_680, 0xf00008);
-
- /* Disable Rx TimeOut, workaround for BB. */
- ar9170_regwrite(AR9170_MAC_REG_RX_TIMEOUT, 0x0);
-
- /* Set CPU clock frequency to 88/80MHz */
- ar9170_regwrite(AR9170_PWR_REG_CLOCK_SEL,
- AR9170_PWR_CLK_AHB_80_88MHZ |
- AR9170_PWR_CLK_DAC_160_INV_DLY);
-
- /* Set WLAN DMA interrupt mode: generate int per packet */
- ar9170_regwrite(AR9170_MAC_REG_TXRX_MPI, 0x110011);
-
- ar9170_regwrite(AR9170_MAC_REG_FCS_SELECT,
- AR9170_MAC_FCS_FIFO_PROT);
-
- /* Disables the CF_END frame, undocumented register */
- ar9170_regwrite(AR9170_MAC_REG_TXOP_NOT_ENOUGH_IND,
- 0x141E0F48);
-
- ar9170_regwrite_finish();
-
- return ar9170_regwrite_result();
-}
-
-static int ar9170_set_mac_reg(struct ar9170 *ar, const u32 reg, const u8 *mac)
-{
- static const u8 zero[ETH_ALEN] = { 0 };
-
- if (!mac)
- mac = zero;
-
- ar9170_regwrite_begin(ar);
-
- ar9170_regwrite(reg,
- (mac[3] << 24) | (mac[2] << 16) |
- (mac[1] << 8) | mac[0]);
-
- ar9170_regwrite(reg + 4, (mac[5] << 8) | mac[4]);
-
- ar9170_regwrite_finish();
-
- return ar9170_regwrite_result();
-}
-
-int ar9170_update_multicast(struct ar9170 *ar)
-{
- int err;
-
- ar9170_regwrite_begin(ar);
- ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_H,
- ar->want_mc_hash >> 32);
- ar9170_regwrite(AR9170_MAC_REG_GROUP_HASH_TBL_L,
- ar->want_mc_hash);
-
- ar9170_regwrite_finish();
- err = ar9170_regwrite_result();
-
- if (err)
- return err;
-
- ar->cur_mc_hash = ar->want_mc_hash;
-
- return 0;
-}
-
-int ar9170_update_frame_filter(struct ar9170 *ar)
-{
- int err;
-
- err = ar9170_write_reg(ar, AR9170_MAC_REG_FRAMETYPE_FILTER,
- ar->want_filter);
-
- if (err)
- return err;
-
- ar->cur_filter = ar->want_filter;
-
- return 0;
-}
-
-static int ar9170_set_promiscouous(struct ar9170 *ar)
-{
- u32 encr_mode, sniffer;
- int err;
-
- err = ar9170_read_reg(ar, AR9170_MAC_REG_SNIFFER, &sniffer);
- if (err)
- return err;
-
- err = ar9170_read_reg(ar, AR9170_MAC_REG_ENCRYPTION, &encr_mode);
- if (err)
- return err;
-
- if (ar->sniffer_enabled) {
- sniffer |= AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC;
-
- /*
- * Rx decryption works in place.
- *
- * If we don't disable it, the hardware will render all
- * encrypted frames which are encrypted with an unknown
- * key useless.
- */
-
- encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE;
- ar->sniffer_enabled = true;
- } else {
- sniffer &= ~AR9170_MAC_REG_SNIFFER_ENABLE_PROMISC;
-
- if (ar->rx_software_decryption)
- encr_mode |= AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE;
- else
- encr_mode &= ~AR9170_MAC_REG_ENCRYPTION_RX_SOFTWARE;
- }
-
- ar9170_regwrite_begin(ar);
- ar9170_regwrite(AR9170_MAC_REG_ENCRYPTION, encr_mode);
- ar9170_regwrite(AR9170_MAC_REG_SNIFFER, sniffer);
- ar9170_regwrite_finish();
-
- return ar9170_regwrite_result();
-}
-
-int ar9170_set_operating_mode(struct ar9170 *ar)
-{
- u32 pm_mode = AR9170_MAC_REG_POWERMGT_DEFAULTS;
- u8 *mac_addr, *bssid;
- int err;
-
- if (ar->vif) {
- mac_addr = ar->mac_addr;
- bssid = ar->bssid;
-
- switch (ar->vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_ADHOC:
- pm_mode |= AR9170_MAC_REG_POWERMGT_IBSS;
- break;
- case NL80211_IFTYPE_AP:
- pm_mode |= AR9170_MAC_REG_POWERMGT_AP;
- break;
- case NL80211_IFTYPE_WDS:
- pm_mode |= AR9170_MAC_REG_POWERMGT_AP_WDS;
- break;
- case NL80211_IFTYPE_MONITOR:
- ar->sniffer_enabled = true;
- ar->rx_software_decryption = true;
- break;
- default:
- pm_mode |= AR9170_MAC_REG_POWERMGT_STA;
- break;
- }
- } else {
- mac_addr = NULL;
- bssid = NULL;
- }
-
- err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_MAC_ADDR_L, mac_addr);
- if (err)
- return err;
-
- err = ar9170_set_mac_reg(ar, AR9170_MAC_REG_BSSID_L, bssid);
- if (err)
- return err;
-
- err = ar9170_set_promiscouous(ar);
- if (err)
- return err;
-
- /* set AMPDU density to 8us. */
- err = ar9170_set_ampdu_density(ar, 6);
- if (err)
- return err;
-
- ar9170_regwrite_begin(ar);
-
- ar9170_regwrite(AR9170_MAC_REG_POWERMANAGEMENT, pm_mode);
- ar9170_regwrite_finish();
-
- return ar9170_regwrite_result();
-}
-
-int ar9170_set_hwretry_limit(struct ar9170 *ar, unsigned int max_retry)
-{
- u32 tmp = min_t(u32, 0x33333, max_retry * 0x11111);
-
- return ar9170_write_reg(ar, AR9170_MAC_REG_RETRY_MAX, tmp);
-}
-
-int ar9170_set_beacon_timers(struct ar9170 *ar)
-{
- u32 v = 0;
- u32 pretbtt = 0;
-
- if (ar->vif) {
- v |= ar->vif->bss_conf.beacon_int;
-
- switch (ar->vif->type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_ADHOC:
- v |= BIT(25);
- break;
- case NL80211_IFTYPE_AP:
- v |= BIT(24);
- pretbtt = (ar->vif->bss_conf.beacon_int - 6) << 16;
- break;
- default:
- break;
- }
-
- v |= ar->vif->bss_conf.dtim_period << 16;
- }
-
- ar9170_regwrite_begin(ar);
-
- ar9170_regwrite(AR9170_MAC_REG_PRETBTT, pretbtt);
- ar9170_regwrite(AR9170_MAC_REG_BCN_PERIOD, v);
- ar9170_regwrite_finish();
- return ar9170_regwrite_result();
-}
-
-int ar9170_update_beacon(struct ar9170 *ar)
-{
- struct sk_buff *skb;
- __le32 *data, *old = NULL;
- u32 word;
- int i;
-
- skb = ieee80211_beacon_get(ar->hw, ar->vif);
- if (!skb)
- return -ENOMEM;
-
- data = (__le32 *)skb->data;
- if (ar->beacon)
- old = (__le32 *)ar->beacon->data;
-
- ar9170_regwrite_begin(ar);
- for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
- /*
- * XXX: This accesses beyond skb data for up
- * to the last 3 bytes!!
- */
-
- if (old && (data[i] == old[i]))
- continue;
-
- word = le32_to_cpu(data[i]);
- ar9170_regwrite(AR9170_BEACON_BUFFER_ADDRESS + 4 * i, word);
- }
-
- /* XXX: use skb->cb info */
- if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ)
- ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP,
- ((skb->len + 4) << (3+16)) + 0x0400);
- else
- ar9170_regwrite(AR9170_MAC_REG_BCN_PLCP,
- ((skb->len + 4) << (3+16)) + 0x0400);
-
- ar9170_regwrite(AR9170_MAC_REG_BCN_LENGTH, skb->len + 4);
- ar9170_regwrite(AR9170_MAC_REG_BCN_ADDR, AR9170_BEACON_BUFFER_ADDRESS);
- ar9170_regwrite(AR9170_MAC_REG_BCN_CTRL, 1);
-
- ar9170_regwrite_finish();
-
- dev_kfree_skb(ar->beacon);
- ar->beacon = skb;
-
- return ar9170_regwrite_result();
-}
-
-void ar9170_new_beacon(struct work_struct *work)
-{
- struct ar9170 *ar = container_of(work, struct ar9170,
- beacon_work);
- struct sk_buff *skb;
-
- if (unlikely(!IS_STARTED(ar)))
- return ;
-
- mutex_lock(&ar->mutex);
-
- if (!ar->vif)
- goto out;
-
- ar9170_update_beacon(ar);
-
- rcu_read_lock();
- while ((skb = ieee80211_get_buffered_bc(ar->hw, ar->vif)))
- ar9170_op_tx(ar->hw, skb);
-
- rcu_read_unlock();
-
- out:
- mutex_unlock(&ar->mutex);
-}
-
-int ar9170_upload_key(struct ar9170 *ar, u8 id, const u8 *mac, u8 ktype,
- u8 keyidx, u8 *keydata, int keylen)
-{
- __le32 vals[7];
- static const u8 bcast[ETH_ALEN] =
- { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
- u8 dummy;
-
- mac = mac ? : bcast;
-
- vals[0] = cpu_to_le32((keyidx << 16) + id);
- vals[1] = cpu_to_le32(mac[1] << 24 | mac[0] << 16 | ktype);
- vals[2] = cpu_to_le32(mac[5] << 24 | mac[4] << 16 |
- mac[3] << 8 | mac[2]);
- memset(&vals[3], 0, 16);
- if (keydata)
- memcpy(&vals[3], keydata, keylen);
-
- return ar->exec_cmd(ar, AR9170_CMD_EKEY,
- sizeof(vals), (u8 *)vals,
- 1, &dummy);
-}
-
-int ar9170_disable_key(struct ar9170 *ar, u8 id)
-{
- __le32 val = cpu_to_le32(id);
- u8 dummy;
-
- return ar->exec_cmd(ar, AR9170_CMD_EKEY,
- sizeof(val), (u8 *)&val,
- 1, &dummy);
-}
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
deleted file mode 100644
index 99df9ddae9cb..000000000000
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ /dev/null
@@ -1,1984 +0,0 @@
-/*
- * Atheros AR9170 driver
- *
- * mac80211 interaction code
- *
- * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
- * Copyright 2009, Christian Lamparter <chunkeey@web.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, see
- * http://www.gnu.org/licenses/.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * Copyright (c) 2007-2008 Atheros Communications, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <net/mac80211.h>
-#include "ar9170.h"
-#include "hw.h"
-#include "cmd.h"
-
-static int modparam_nohwcrypt;
-module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
-MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
-
-#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
- .bitrate = (_bitrate), \
- .flags = (_flags), \
- .hw_value = (_hw_rate) | (_txpidx) << 4, \
-}
-
-static struct ieee80211_rate __ar9170_ratetable[] = {
- RATE(10, 0, 0, 0),
- RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(60, 0xb, 0, 0),
- RATE(90, 0xf, 0, 0),
- RATE(120, 0xa, 0, 0),
- RATE(180, 0xe, 0, 0),
- RATE(240, 0x9, 0, 0),
- RATE(360, 0xd, 1, 0),
- RATE(480, 0x8, 2, 0),
- RATE(540, 0xc, 3, 0),
-};
-#undef RATE
-
-#define ar9170_g_ratetable (__ar9170_ratetable + 0)
-#define ar9170_g_ratetable_size 12
-#define ar9170_a_ratetable (__ar9170_ratetable + 4)
-#define ar9170_a_ratetable_size 8
-
-/*
- * NB: The hw_value is used as an index into the ar9170_phy_freq_params
- * array in phy.c so that we don't have to do frequency lookups!
- */
-#define CHAN(_freq, _idx) { \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 18, /* XXX */ \
-}
-
-static struct ieee80211_channel ar9170_2ghz_chantable[] = {
- CHAN(2412, 0),
- CHAN(2417, 1),
- CHAN(2422, 2),
- CHAN(2427, 3),
- CHAN(2432, 4),
- CHAN(2437, 5),
- CHAN(2442, 6),
- CHAN(2447, 7),
- CHAN(2452, 8),
- CHAN(2457, 9),
- CHAN(2462, 10),
- CHAN(2467, 11),
- CHAN(2472, 12),
- CHAN(2484, 13),
-};
-
-static struct ieee80211_channel ar9170_5ghz_chantable[] = {
- CHAN(4920, 14),
- CHAN(4940, 15),
- CHAN(4960, 16),
- CHAN(4980, 17),
- CHAN(5040, 18),
- CHAN(5060, 19),
- CHAN(5080, 20),
- CHAN(5180, 21),
- CHAN(5200, 22),
- CHAN(5220, 23),
- CHAN(5240, 24),
- CHAN(5260, 25),
- CHAN(5280, 26),
- CHAN(5300, 27),
- CHAN(5320, 28),
- CHAN(5500, 29),
- CHAN(5520, 30),
- CHAN(5540, 31),
- CHAN(5560, 32),
- CHAN(5580, 33),
- CHAN(5600, 34),
- CHAN(5620, 35),
- CHAN(5640, 36),
- CHAN(5660, 37),
- CHAN(5680, 38),
- CHAN(5700, 39),
- CHAN(5745, 40),
- CHAN(5765, 41),
- CHAN(5785, 42),
- CHAN(5805, 43),
- CHAN(5825, 44),
- CHAN(5170, 45),
- CHAN(5190, 46),
- CHAN(5210, 47),
- CHAN(5230, 48),
-};
-#undef CHAN
-
-#define AR9170_HT_CAP \
-{ \
- .ht_supported = true, \
- .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
- IEEE80211_HT_CAP_SM_PS | \
- IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
- IEEE80211_HT_CAP_SGI_40 | \
- IEEE80211_HT_CAP_DSSSCCK40 | \
- IEEE80211_HT_CAP_SM_PS, \
- .ampdu_factor = 3, \
- .ampdu_density = 6, \
- .mcs = { \
- .rx_mask = { 0xFF, 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, }, \
- }, \
-}
-
-static struct ieee80211_supported_band ar9170_band_2GHz = {
- .channels = ar9170_2ghz_chantable,
- .n_channels = ARRAY_SIZE(ar9170_2ghz_chantable),
- .bitrates = ar9170_g_ratetable,
- .n_bitrates = ar9170_g_ratetable_size,
- .ht_cap = AR9170_HT_CAP,
-};
-
-static struct ieee80211_supported_band ar9170_band_5GHz = {
- .channels = ar9170_5ghz_chantable,
- .n_channels = ARRAY_SIZE(ar9170_5ghz_chantable),
- .bitrates = ar9170_a_ratetable,
- .n_bitrates = ar9170_a_ratetable_size,
- .ht_cap = AR9170_HT_CAP,
-};
-
-#ifdef AR9170_QUEUE_DEBUG
-/*
- * In case some wants works with AR9170's crazy tx_status queueing techniques.
- * He might need this rather useful probing function.
- *
- * NOTE: caller must hold the queue's spinlock!
- */
-
-static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
-{
- struct ar9170_tx_control *txc = (void *) skb->data;
- struct ieee80211_hdr *hdr = (void *)txc->frame_data;
-
- printk(KERN_DEBUG "%s: => FRAME [skb:%p, queue:%d, DA:[%pM] "
- "mac_control:%04x, phy_control:%08x]\n",
- wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
- ieee80211_get_DA(hdr), le16_to_cpu(txc->mac_control),
- le32_to_cpu(txc->phy_control));
-}
-
-static void ar9170_dump_station_tx_status_queue(struct ar9170 *ar,
- struct sk_buff_head *queue)
-{
- struct sk_buff *skb;
- int i = 0;
-
- printk(KERN_DEBUG "---[ cut here ]---\n");
- printk(KERN_DEBUG "%s: %d entries in tx_status queue.\n",
- wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
-
- skb_queue_walk(queue, skb) {
- struct ar9170_tx_control *txc = (void *) skb->data;
- struct ieee80211_hdr *hdr = (void *)txc->frame_data;
-
- printk(KERN_DEBUG "index:%d => \n", i);
- ar9170_print_txheader(ar, skb);
- }
- printk(KERN_DEBUG "---[ end ]---\n");
-}
-#endif /* AR9170_QUEUE_DEBUG */
-
-void ar9170_handle_tx_status(struct ar9170 *ar, struct sk_buff *skb,
- bool valid_status, u16 tx_status)
-{
- struct ieee80211_tx_info *txinfo;
- unsigned int retries = 0, queue = skb_get_queue_mapping(skb);
- unsigned long flags;
-
- spin_lock_irqsave(&ar->tx_stats_lock, flags);
- ar->tx_stats[queue].len--;
- if (ieee80211_queue_stopped(ar->hw, queue))
- ieee80211_wake_queue(ar->hw, queue);
- spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
-
- txinfo = IEEE80211_SKB_CB(skb);
- ieee80211_tx_info_clear_status(txinfo);
-
- switch (tx_status) {
- case AR9170_TX_STATUS_RETRY:
- retries = 2;
- case AR9170_TX_STATUS_COMPLETE:
- txinfo->flags |= IEEE80211_TX_STAT_ACK;
- break;
-
- case AR9170_TX_STATUS_FAILED:
- retries = ar->hw->conf.long_frame_max_tx_count;
- break;
-
- default:
- printk(KERN_ERR "%s: invalid tx_status response (%x).\n",
- wiphy_name(ar->hw->wiphy), tx_status);
- break;
- }
-
- if (valid_status)
- txinfo->status.rates[0].count = retries + 1;
-
- skb_pull(skb, sizeof(struct ar9170_tx_control));
- ieee80211_tx_status_irqsafe(ar->hw, skb);
-}
-
-static struct sk_buff *ar9170_find_skb_in_queue(struct ar9170 *ar,
- const u8 *mac,
- const u32 queue,
- struct sk_buff_head *q)
-{
- unsigned long flags;
- struct sk_buff *skb;
-
- spin_lock_irqsave(&q->lock, flags);
- skb_queue_walk(q, skb) {
- struct ar9170_tx_control *txc = (void *) skb->data;
- struct ieee80211_hdr *hdr = (void *) txc->frame_data;
- u32 txc_queue = (le32_to_cpu(txc->phy_control) &
- AR9170_TX_PHY_QOS_MASK) >>
- AR9170_TX_PHY_QOS_SHIFT;
-
- if ((queue != txc_queue) ||
- (compare_ether_addr(ieee80211_get_DA(hdr), mac)))
- continue;
-
- __skb_unlink(skb, q);
- spin_unlock_irqrestore(&q->lock, flags);
- return skb;
- }
- spin_unlock_irqrestore(&q->lock, flags);
- return NULL;
-}
-
-static struct sk_buff *ar9170_find_queued_skb(struct ar9170 *ar, const u8 *mac,
- const u32 queue)
-{
- struct ieee80211_sta *sta;
- struct sk_buff *skb;
-
- /*
- * Unfortunately, the firmware does not tell to which (queued) frame
- * this transmission status report belongs to.
- *
- * So we have to make risky guesses - with the scarce information
- * the firmware provided (-> destination MAC, and phy_control) -
- * and hope that we picked the right one...
- */
- rcu_read_lock();
- sta = ieee80211_find_sta(ar->hw, mac);
-
- if (likely(sta)) {
- struct ar9170_sta_info *sta_priv = (void *) sta->drv_priv;
- skb = skb_dequeue(&sta_priv->tx_status[queue]);
- rcu_read_unlock();
- if (likely(skb))
- return skb;
- } else
- rcu_read_unlock();
-
- /* scan the waste queue for candidates */
- skb = ar9170_find_skb_in_queue(ar, mac, queue,
- &ar->global_tx_status_waste);
- if (!skb) {
- /* so it still _must_ be in the global list. */
- skb = ar9170_find_skb_in_queue(ar, mac, queue,
- &ar->global_tx_status);
- }
-
-#ifdef AR9170_QUEUE_DEBUG
- if (unlikely((!skb) && net_ratelimit())) {
- printk(KERN_ERR "%s: ESS:[%pM] does not have any "
- "outstanding frames in this queue (%d).\n",
- wiphy_name(ar->hw->wiphy), mac, queue);
- }
-#endif /* AR9170_QUEUE_DEBUG */
- return skb;
-}
-
-/*
- * This worker tries to keep the global tx_status queue empty.
- * So we can guarantee that incoming tx_status reports for
- * unregistered stations are always synced with the actual
- * frame - which we think - belongs to.
- */
-
-static void ar9170_tx_status_janitor(struct work_struct *work)
-{
- struct ar9170 *ar = container_of(work, struct ar9170,
- tx_status_janitor.work);
- struct sk_buff *skb;
-
- if (unlikely(!IS_STARTED(ar)))
- return ;
-
- mutex_lock(&ar->mutex);
- /* recycle the garbage back to mac80211... one by one. */
- while ((skb = skb_dequeue(&ar->global_tx_status_waste))) {
-#ifdef AR9170_QUEUE_DEBUG
- printk(KERN_DEBUG "%s: dispose queued frame =>\n",
- wiphy_name(ar->hw->wiphy));
- ar9170_print_txheader(ar, skb);
-#endif /* AR9170_QUEUE_DEBUG */
- ar9170_handle_tx_status(ar, skb, false,
- AR9170_TX_STATUS_FAILED);
- }
-
- while ((skb = skb_dequeue(&ar->global_tx_status))) {
-#ifdef AR9170_QUEUE_DEBUG
- printk(KERN_DEBUG "%s: moving frame into waste queue =>\n",
- wiphy_name(ar->hw->wiphy));
-
- ar9170_print_txheader(ar, skb);
-#endif /* AR9170_QUEUE_DEBUG */
- skb_queue_tail(&ar->global_tx_status_waste, skb);
- }
-
- /* recall the janitor in 100ms - if there's garbage in the can. */
- if (skb_queue_len(&ar->global_tx_status_waste) > 0)
- queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor,
- msecs_to_jiffies(100));
-
- mutex_unlock(&ar->mutex);
-}
-
-static void ar9170_handle_command_response(struct ar9170 *ar,
- void *buf, u32 len)
-{
- struct ar9170_cmd_response *cmd = (void *) buf;
-
- if ((cmd->type & 0xc0) != 0xc0) {
- ar->callback_cmd(ar, len, buf);
- return;
- }
-
- /* hardware event handlers */
- switch (cmd->type) {
- case 0xc1: {
- /*
- * TX status notification:
- * bytes: 0c c1 XX YY M1 M2 M3 M4 M5 M6 R4 R3 R2 R1 S2 S1
- *
- * XX always 81
- * YY always 00
- * M1-M6 is the MAC address
- * R1-R4 is the transmit rate
- * S1-S2 is the transmit status
- */
-
- struct sk_buff *skb;
- u32 queue = (le32_to_cpu(cmd->tx_status.rate) &
- AR9170_TX_PHY_QOS_MASK) >> AR9170_TX_PHY_QOS_SHIFT;
-
- skb = ar9170_find_queued_skb(ar, cmd->tx_status.dst, queue);
- if (unlikely(!skb))
- return ;
-
- ar9170_handle_tx_status(ar, skb, true,
- le16_to_cpu(cmd->tx_status.status));
- break;
- }
-
- case 0xc0:
- /*
- * pre-TBTT event
- */
- if (ar->vif && ar->vif->type == NL80211_IFTYPE_AP)
- queue_work(ar->hw->workqueue, &ar->beacon_work);
- break;
-
- case 0xc2:
- /*
- * (IBSS) beacon send notification
- * bytes: 04 c2 XX YY B4 B3 B2 B1
- *
- * XX always 80
- * YY always 00
- * B1-B4 "should" be the number of send out beacons.
- */
- break;
-
- case 0xc3:
- /* End of Atim Window */
- break;
-
- case 0xc4:
- case 0xc5:
- /* BlockACK events */
- break;
-
- case 0xc6:
- /* Watchdog Interrupt */
- break;
-
- case 0xc9:
- /* retransmission issue / SIFS/EIFS collision ?! */
- break;
-
- default:
- printk(KERN_INFO "received unhandled event %x\n", cmd->type);
- print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
- break;
- }
-}
-
-static void ar9170_rx_reset_rx_mpdu(struct ar9170 *ar)
-{
- memset(&ar->rx_mpdu.plcp, 0, sizeof(struct ar9170_rx_head));
- ar->rx_mpdu.has_plcp = false;
-}
-
-static int ar9170_nag_limiter(struct ar9170 *ar)
-{
- bool print_message;
-
- /*
- * we expect all sorts of errors in promiscuous mode.
- * don't bother with it, it's OK!
- */
- if (ar->sniffer_enabled)
- return false;
-
- /*
- * only go for frequent errors! The hardware tends to
- * do some stupid thing once in a while under load, in
- * noisy environments or just for fun!
- */
- if (time_before(jiffies, ar->bad_hw_nagger) && net_ratelimit())
- print_message = true;
- else
- print_message = false;
-
- /* reset threshold for "once in a while" */
- ar->bad_hw_nagger = jiffies + HZ / 4;
- return print_message;
-}
-
-static int ar9170_rx_mac_status(struct ar9170 *ar,
- struct ar9170_rx_head *head,
- struct ar9170_rx_macstatus *mac,
- struct ieee80211_rx_status *status)
-{
- u8 error, decrypt;
-
- BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12);
- BUILD_BUG_ON(sizeof(struct ar9170_rx_macstatus) != 4);
-
- error = mac->error;
- if (error & AR9170_RX_ERROR_MMIC) {
- status->flag |= RX_FLAG_MMIC_ERROR;
- error &= ~AR9170_RX_ERROR_MMIC;
- }
-
- if (error & AR9170_RX_ERROR_PLCP) {
- status->flag |= RX_FLAG_FAILED_PLCP_CRC;
- error &= ~AR9170_RX_ERROR_PLCP;
-
- if (!(ar->filter_state & FIF_PLCPFAIL))
- return -EINVAL;
- }
-
- if (error & AR9170_RX_ERROR_FCS) {
- status->flag |= RX_FLAG_FAILED_FCS_CRC;
- error &= ~AR9170_RX_ERROR_FCS;
-
- if (!(ar->filter_state & FIF_FCSFAIL))
- return -EINVAL;
- }
-
- decrypt = ar9170_get_decrypt_type(mac);
- if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
- decrypt != AR9170_ENC_ALG_NONE)
- status->flag |= RX_FLAG_DECRYPTED;
-
- /* ignore wrong RA errors */
- error &= ~AR9170_RX_ERROR_WRONG_RA;
-
- if (error & AR9170_RX_ERROR_DECRYPT) {
- error &= ~AR9170_RX_ERROR_DECRYPT;
- /*
- * Rx decryption is done in place,
- * the original data is lost anyway.
- */
-
- return -EINVAL;
- }
-
- /* drop any other error frames */
- if (unlikely(error)) {
- /* TODO: update netdevice's RX dropped/errors statistics */
-
- if (ar9170_nag_limiter(ar))
- printk(KERN_DEBUG "%s: received frame with "
- "suspicious error code (%#x).\n",
- wiphy_name(ar->hw->wiphy), error);
-
- return -EINVAL;
- }
-
- status->band = ar->channel->band;
- status->freq = ar->channel->center_freq;
-
- switch (mac->status & AR9170_RX_STATUS_MODULATION_MASK) {
- case AR9170_RX_STATUS_MODULATION_CCK:
- if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
- status->flag |= RX_FLAG_SHORTPRE;
- switch (head->plcp[0]) {
- case 0x0a:
- status->rate_idx = 0;
- break;
- case 0x14:
- status->rate_idx = 1;
- break;
- case 0x37:
- status->rate_idx = 2;
- break;
- case 0x6e:
- status->rate_idx = 3;
- break;
- default:
- if (ar9170_nag_limiter(ar))
- printk(KERN_ERR "%s: invalid plcp cck rate "
- "(%x).\n", wiphy_name(ar->hw->wiphy),
- head->plcp[0]);
- return -EINVAL;
- }
- break;
-
- case AR9170_RX_STATUS_MODULATION_OFDM:
- switch (head->plcp[0] & 0xf) {
- case 0xb:
- status->rate_idx = 0;
- break;
- case 0xf:
- status->rate_idx = 1;
- break;
- case 0xa:
- status->rate_idx = 2;
- break;
- case 0xe:
- status->rate_idx = 3;
- break;
- case 0x9:
- status->rate_idx = 4;
- break;
- case 0xd:
- status->rate_idx = 5;
- break;
- case 0x8:
- status->rate_idx = 6;
- break;
- case 0xc:
- status->rate_idx = 7;
- break;
- default:
- if (ar9170_nag_limiter(ar))
- printk(KERN_ERR "%s: invalid plcp ofdm rate "
- "(%x).\n", wiphy_name(ar->hw->wiphy),
- head->plcp[0]);
- return -EINVAL;
- }
- if (status->band == IEEE80211_BAND_2GHZ)
- status->rate_idx += 4;
- break;
-
- case AR9170_RX_STATUS_MODULATION_HT:
- if (head->plcp[3] & 0x80)
- status->flag |= RX_FLAG_40MHZ;
- if (head->plcp[6] & 0x80)
- status->flag |= RX_FLAG_SHORT_GI;
-
- status->rate_idx = clamp(0, 75, head->plcp[6] & 0x7f);
- status->flag |= RX_FLAG_HT;
- break;
-
- case AR9170_RX_STATUS_MODULATION_DUPOFDM:
- /* XXX */
- if (ar9170_nag_limiter(ar))
- printk(KERN_ERR "%s: invalid modulation\n",
- wiphy_name(ar->hw->wiphy));
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void ar9170_rx_phy_status(struct ar9170 *ar,
- struct ar9170_rx_phystatus *phy,
- struct ieee80211_rx_status *status)
-{
- int i;
-
- BUILD_BUG_ON(sizeof(struct ar9170_rx_phystatus) != 20);
-
- for (i = 0; i < 3; i++)
- if (phy->rssi[i] != 0x80)
- status->antenna |= BIT(i);
-
- /* post-process RSSI */
- for (i = 0; i < 7; i++)
- if (phy->rssi[i] & 0x80)
- phy->rssi[i] = ((phy->rssi[i] & 0x7f) + 1) & 0x7f;
-
- /* TODO: we could do something with phy_errors */
- status->signal = ar->noise[0] + phy->rssi_combined;
- status->noise = ar->noise[0];
-}
-
-static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len)
-{
- struct sk_buff *skb;
- int reserved = 0;
- struct ieee80211_hdr *hdr = (void *) buf;
-
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- reserved += NET_IP_ALIGN;
-
- if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
- reserved += NET_IP_ALIGN;
- }
-
- if (ieee80211_has_a4(hdr->frame_control))
- reserved += NET_IP_ALIGN;
-
- reserved = 32 + (reserved & NET_IP_ALIGN);
-
- skb = dev_alloc_skb(len + reserved);
- if (likely(skb)) {
- skb_reserve(skb, reserved);
- memcpy(skb_put(skb, len), buf, len);
- }
-
- return skb;
-}
-
-/*
- * If the frame alignment is right (or the kernel has
- * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
- * is only a single MPDU in the USB frame, then we could
- * submit to mac80211 the SKB directly. However, since
- * there may be multiple packets in one SKB in stream
- * mode, and we need to observe the proper ordering,
- * this is non-trivial.
- */
-
-static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
-{
- struct ar9170_rx_head *head;
- struct ar9170_rx_macstatus *mac;
- struct ar9170_rx_phystatus *phy = NULL;
- struct ieee80211_rx_status status;
- struct sk_buff *skb;
- int mpdu_len;
-
- if (unlikely(!IS_STARTED(ar) || len < (sizeof(*mac))))
- return ;
-
- /* Received MPDU */
- mpdu_len = len - sizeof(*mac);
-
- mac = (void *)(buf + mpdu_len);
- if (unlikely(mac->error & AR9170_RX_ERROR_FATAL)) {
- /* this frame is too damaged and can't be used - drop it */
-
- return ;
- }
-
- switch (mac->status & AR9170_RX_STATUS_MPDU_MASK) {
- case AR9170_RX_STATUS_MPDU_FIRST:
- /* first mpdu packet has the plcp header */
- if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
- head = (void *) buf;
- memcpy(&ar->rx_mpdu.plcp, (void *) buf,
- sizeof(struct ar9170_rx_head));
-
- mpdu_len -= sizeof(struct ar9170_rx_head);
- buf += sizeof(struct ar9170_rx_head);
- ar->rx_mpdu.has_plcp = true;
- } else {
- if (ar9170_nag_limiter(ar))
- printk(KERN_ERR "%s: plcp info is clipped.\n",
- wiphy_name(ar->hw->wiphy));
- return ;
- }
- break;
-
- case AR9170_RX_STATUS_MPDU_LAST:
- /* last mpdu has a extra tail with phy status information */
-
- if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
- mpdu_len -= sizeof(struct ar9170_rx_phystatus);
- phy = (void *)(buf + mpdu_len);
- } else {
- if (ar9170_nag_limiter(ar))
- printk(KERN_ERR "%s: frame tail is clipped.\n",
- wiphy_name(ar->hw->wiphy));
- return ;
- }
-
- case AR9170_RX_STATUS_MPDU_MIDDLE:
- /* middle mpdus are just data */
- if (unlikely(!ar->rx_mpdu.has_plcp)) {
- if (!ar9170_nag_limiter(ar))
- return ;
-
- printk(KERN_ERR "%s: rx stream did not start "
- "with a first_mpdu frame tag.\n",
- wiphy_name(ar->hw->wiphy));
-
- return ;
- }
-
- head = &ar->rx_mpdu.plcp;
- break;
-
- case AR9170_RX_STATUS_MPDU_SINGLE:
- /* single mpdu - has plcp (head) and phy status (tail) */
- head = (void *) buf;
-
- mpdu_len -= sizeof(struct ar9170_rx_head);
- mpdu_len -= sizeof(struct ar9170_rx_phystatus);
-
- buf += sizeof(struct ar9170_rx_head);
- phy = (void *)(buf + mpdu_len);
- break;
-
- default:
- BUG_ON(1);
- break;
- }
-
- if (unlikely(mpdu_len < FCS_LEN))
- return ;
-
- memset(&status, 0, sizeof(status));
- if (unlikely(ar9170_rx_mac_status(ar, head, mac, &status)))
- return ;
-
- if (phy)
- ar9170_rx_phy_status(ar, phy, &status);
-
- skb = ar9170_rx_copy_data(buf, mpdu_len);
- if (likely(skb))
- ieee80211_rx_irqsafe(ar->hw, skb, &status);
-}
-
-void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
-{
- unsigned int i, tlen, resplen, wlen = 0, clen = 0;
- u8 *tbuf, *respbuf;
-
- tbuf = skb->data;
- tlen = skb->len;
-
- while (tlen >= 4) {
- clen = tbuf[1] << 8 | tbuf[0];
- wlen = ALIGN(clen, 4);
-
- /* check if this is stream has a valid tag.*/
- if (tbuf[2] != 0 || tbuf[3] != 0x4e) {
- /*
- * TODO: handle the highly unlikely event that the
- * corrupted stream has the TAG at the right position.
- */
-
- /* check if the frame can be repaired. */
- if (!ar->rx_failover_missing) {
- /* this is no "short read". */
- if (ar9170_nag_limiter(ar)) {
- printk(KERN_ERR "%s: missing tag!\n",
- wiphy_name(ar->hw->wiphy));
- goto err_telluser;
- } else
- goto err_silent;
- }
-
- if (ar->rx_failover_missing > tlen) {
- if (ar9170_nag_limiter(ar)) {
- printk(KERN_ERR "%s: possible multi "
- "stream corruption!\n",
- wiphy_name(ar->hw->wiphy));
- goto err_telluser;
- } else
- goto err_silent;
- }
-
- memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
- ar->rx_failover_missing -= tlen;
-
- if (ar->rx_failover_missing <= 0) {
- /*
- * nested ar9170_rx call!
- * termination is guranteed, even when the
- * combined frame also have a element with
- * a bad tag.
- */
-
- ar->rx_failover_missing = 0;
- ar9170_rx(ar, ar->rx_failover);
-
- skb_reset_tail_pointer(ar->rx_failover);
- skb_trim(ar->rx_failover, 0);
- }
-
- return ;
- }
-
- /* check if stream is clipped */
- if (wlen > tlen - 4) {
- if (ar->rx_failover_missing) {
- /* TODO: handle double stream corruption. */
- if (ar9170_nag_limiter(ar)) {
- printk(KERN_ERR "%s: double rx stream "
- "corruption!\n",
- wiphy_name(ar->hw->wiphy));
- goto err_telluser;
- } else
- goto err_silent;
- }
-
- /*
- * save incomplete data set.
- * the firmware will resend the missing bits when
- * the rx - descriptor comes round again.
- */
-
- memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
- ar->rx_failover_missing = clen - tlen;
- return ;
- }
- resplen = clen;
- respbuf = tbuf + 4;
- tbuf += wlen + 4;
- tlen -= wlen + 4;
-
- i = 0;
-
- /* weird thing, but this is the same in the original driver */
- while (resplen > 2 && i < 12 &&
- respbuf[0] == 0xff && respbuf[1] == 0xff) {
- i += 2;
- resplen -= 2;
- respbuf += 2;
- }
-
- if (resplen < 4)
- continue;
-
- /* found the 6 * 0xffff marker? */
- if (i == 12)
- ar9170_handle_command_response(ar, respbuf, resplen);
- else
- ar9170_handle_mpdu(ar, respbuf, clen);
- }
-
- if (tlen) {
- if (net_ratelimit())
- printk(KERN_ERR "%s: %d bytes of unprocessed "
- "data left in rx stream!\n",
- wiphy_name(ar->hw->wiphy), tlen);
-
- goto err_telluser;
- }
-
- return ;
-
-err_telluser:
- printk(KERN_ERR "%s: damaged RX stream data [want:%d, "
- "data:%d, rx:%d, pending:%d ]\n",
- wiphy_name(ar->hw->wiphy), clen, wlen, tlen,
- ar->rx_failover_missing);
-
- if (ar->rx_failover_missing)
- print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET,
- ar->rx_failover->data,
- ar->rx_failover->len);
-
- print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET,
- skb->data, skb->len);
-
- printk(KERN_ERR "%s: please check your hardware and cables, if "
- "you see this message frequently.\n",
- wiphy_name(ar->hw->wiphy));
-
-err_silent:
- if (ar->rx_failover_missing) {
- skb_reset_tail_pointer(ar->rx_failover);
- skb_trim(ar->rx_failover, 0);
- ar->rx_failover_missing = 0;
- }
-}
-
-#define AR9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
-do { \
- queue.aifs = ai_fs; \
- queue.cw_min = cwmin; \
- queue.cw_max = cwmax; \
- queue.txop = _txop; \
-} while (0)
-
-static int ar9170_op_start(struct ieee80211_hw *hw)
-{
- struct ar9170 *ar = hw->priv;
- int err, i;
-
- mutex_lock(&ar->mutex);
-
- /* reinitialize queues statistics */
- memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
- for (i = 0; i < ARRAY_SIZE(ar->tx_stats); i++)
- ar->tx_stats[i].limit = 8;
-
- /* reset QoS defaults */
- AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/
- AR9170_FILL_QUEUE(ar->edcf[1], 7, 15, 1023, 0); /* BACKGROUND */
- AR9170_FILL_QUEUE(ar->edcf[2], 2, 7, 15, 94); /* VIDEO */
- AR9170_FILL_QUEUE(ar->edcf[3], 2, 3, 7, 47); /* VOICE */
- AR9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */
-
- ar->bad_hw_nagger = jiffies;
-
- err = ar->open(ar);
- if (err)
- goto out;
-
- err = ar9170_init_mac(ar);
- if (err)
- goto out;
-
- err = ar9170_set_qos(ar);
- if (err)
- goto out;
-
- err = ar9170_init_phy(ar, IEEE80211_BAND_2GHZ);
- if (err)
- goto out;
-
- err = ar9170_init_rf(ar);
- if (err)
- goto out;
-
- /* start DMA */
- err = ar9170_write_reg(ar, 0x1c3d30, 0x100);
- if (err)
- goto out;
-
- ar->state = AR9170_STARTED;
-
-out:
- mutex_unlock(&ar->mutex);
- return err;
-}
-
-static void ar9170_op_stop(struct ieee80211_hw *hw)
-{
- struct ar9170 *ar = hw->priv;
-
- if (IS_STARTED(ar))
- ar->state = AR9170_IDLE;
-
- flush_workqueue(ar->hw->workqueue);
-
- mutex_lock(&ar->mutex);
- cancel_delayed_work_sync(&ar->tx_status_janitor);
- cancel_work_sync(&ar->filter_config_work);
- cancel_work_sync(&ar->beacon_work);
- skb_queue_purge(&ar->global_tx_status_waste);
- skb_queue_purge(&ar->global_tx_status);
-
- if (IS_ACCEPTING_CMD(ar)) {
- ar9170_set_leds_state(ar, 0);
-
- /* stop DMA */
- ar9170_write_reg(ar, 0x1c3d30, 0);
- ar->stop(ar);
- }
-
- mutex_unlock(&ar->mutex);
-}
-
-int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct ar9170 *ar = hw->priv;
- struct ieee80211_hdr *hdr;
- struct ar9170_tx_control *txc;
- struct ieee80211_tx_info *info;
- struct ieee80211_rate *rate = NULL;
- struct ieee80211_tx_rate *txrate;
- unsigned int queue = skb_get_queue_mapping(skb);
- unsigned long flags = 0;
- struct ar9170_sta_info *sta_info = NULL;
- u32 power, chains;
- u16 keytype = 0;
- u16 len, icv = 0;
- int err;
- bool tx_status;
-
- if (unlikely(!IS_STARTED(ar)))
- goto err_free;
-
- hdr = (void *)skb->data;
- info = IEEE80211_SKB_CB(skb);
- len = skb->len;
-
- spin_lock_irqsave(&ar->tx_stats_lock, flags);
- if (ar->tx_stats[queue].limit < ar->tx_stats[queue].len) {
- spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
- return NETDEV_TX_OK;
- }
-
- ar->tx_stats[queue].len++;
- ar->tx_stats[queue].count++;
- if (ar->tx_stats[queue].limit == ar->tx_stats[queue].len)
- ieee80211_stop_queue(hw, queue);
-
- spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
-
- txc = (void *)skb_push(skb, sizeof(*txc));
-
- tx_status = (((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) != 0) ||
- ((info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) != 0));
-
- if (info->control.hw_key) {
- icv = info->control.hw_key->icv_len;
-
- switch (info->control.hw_key->alg) {
- case ALG_WEP:
- keytype = AR9170_TX_MAC_ENCR_RC4;
- break;
- case ALG_TKIP:
- keytype = AR9170_TX_MAC_ENCR_RC4;
- break;
- case ALG_CCMP:
- keytype = AR9170_TX_MAC_ENCR_AES;
- break;
- default:
- WARN_ON(1);
- goto err_dequeue;
- }
- }
-
- /* Length */
- txc->length = cpu_to_le16(len + icv + 4);
-
- txc->mac_control = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
- AR9170_TX_MAC_BACKOFF);
- txc->mac_control |= cpu_to_le16(ar9170_qos_hwmap[queue] <<
- AR9170_TX_MAC_QOS_SHIFT);
- txc->mac_control |= cpu_to_le16(keytype);
- txc->phy_control = cpu_to_le32(0);
-
- if (info->flags & IEEE80211_TX_CTL_NO_ACK)
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
-
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
-
- txrate = &info->control.rates[0];
-
- if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
- else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
-
- if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
- txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
-
- if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
-
- if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ);
- /* this works because 40 MHz is 2 and dup is 3 */
- if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
- txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP);
-
- if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
- txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
-
- if (txrate->flags & IEEE80211_TX_RC_MCS) {
- u32 r = txrate->idx;
- u8 *txpower;
-
- r <<= AR9170_TX_PHY_MCS_SHIFT;
- if (WARN_ON(r & ~AR9170_TX_PHY_MCS_MASK))
- goto err_dequeue;
- txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK);
- txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
-
- if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
- if (info->band == IEEE80211_BAND_5GHZ)
- txpower = ar->power_5G_ht40;
- else
- txpower = ar->power_2G_ht40;
- } else {
- if (info->band == IEEE80211_BAND_5GHZ)
- txpower = ar->power_5G_ht20;
- else
- txpower = ar->power_2G_ht20;
- }
-
- power = txpower[(txrate->idx) & 7];
- } else {
- u8 *txpower;
- u32 mod;
- u32 phyrate;
- u8 idx = txrate->idx;
-
- if (info->band != IEEE80211_BAND_2GHZ) {
- idx += 4;
- txpower = ar->power_5G_leg;
- mod = AR9170_TX_PHY_MOD_OFDM;
- } else {
- if (idx < 4) {
- txpower = ar->power_2G_cck;
- mod = AR9170_TX_PHY_MOD_CCK;
- } else {
- mod = AR9170_TX_PHY_MOD_OFDM;
- txpower = ar->power_2G_ofdm;
- }
- }
-
- rate = &__ar9170_ratetable[idx];
-
- phyrate = rate->hw_value & 0xF;
- power = txpower[(rate->hw_value & 0x30) >> 4];
- phyrate <<= AR9170_TX_PHY_MCS_SHIFT;
-
- txc->phy_control |= cpu_to_le32(mod);
- txc->phy_control |= cpu_to_le32(phyrate);
- }
-
- power <<= AR9170_TX_PHY_TX_PWR_SHIFT;
- power &= AR9170_TX_PHY_TX_PWR_MASK;
- txc->phy_control |= cpu_to_le32(power);
-
- /* set TX chains */
- if (ar->eeprom.tx_mask == 1) {
- chains = AR9170_TX_PHY_TXCHAIN_1;
- } else {
- chains = AR9170_TX_PHY_TXCHAIN_2;
-
- /* >= 36M legacy OFDM - use only one chain */
- if (rate && rate->bitrate >= 360)
- chains = AR9170_TX_PHY_TXCHAIN_1;
- }
- txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
-
- if (tx_status) {
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
- /*
- * WARNING:
- * Putting the QoS queue bits into an unexplored territory is
- * certainly not elegant.
- *
- * In my defense: This idea provides a reasonable way to
- * smuggle valuable information to the tx_status callback.
- * Also, the idea behind this bit-abuse came straight from
- * the original driver code.
- */
-
- txc->phy_control |=
- cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
-
- if (info->control.sta) {
- sta_info = (void *) info->control.sta->drv_priv;
- skb_queue_tail(&sta_info->tx_status[queue], skb);
- } else {
- skb_queue_tail(&ar->global_tx_status, skb);
-
- queue_delayed_work(ar->hw->workqueue,
- &ar->tx_status_janitor,
- msecs_to_jiffies(100));
- }
- }
-
- err = ar->tx(ar, skb, tx_status, 0);
- if (unlikely(tx_status && err)) {
- if (info->control.sta)
- skb_unlink(skb, &sta_info->tx_status[queue]);
- else
- skb_unlink(skb, &ar->global_tx_status);
- }
-
- return NETDEV_TX_OK;
-
-err_dequeue:
- spin_lock_irqsave(&ar->tx_stats_lock, flags);
- ar->tx_stats[queue].len--;
- ar->tx_stats[queue].count--;
- spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
-
-err_free:
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
-}
-
-static int ar9170_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
-{
- struct ar9170 *ar = hw->priv;
- int err = 0;
-
- mutex_lock(&ar->mutex);
-
- if (ar->vif) {
- err = -EBUSY;
- goto unlock;
- }
-
- ar->vif = conf->vif;
- memcpy(ar->mac_addr, conf->mac_addr, ETH_ALEN);
-
- if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
- ar->rx_software_decryption = true;
- ar->disable_offload = true;
- }
-
- ar->cur_filter = 0;
- ar->want_filter = AR9170_MAC_REG_FTF_DEFAULTS;
- err = ar9170_update_frame_filter(ar);
- if (err)
- goto unlock;
-
- err = ar9170_set_operating_mode(ar);
-
-unlock:
- mutex_unlock(&ar->mutex);
- return err;
-}
-
-static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
-{
- struct ar9170 *ar = hw->priv;
-
- mutex_lock(&ar->mutex);
- ar->vif = NULL;
- ar->want_filter = 0;
- ar9170_update_frame_filter(ar);
- ar9170_set_beacon_timers(ar);
- dev_kfree_skb(ar->beacon);
- ar->beacon = NULL;
- ar->sniffer_enabled = false;
- ar->rx_software_decryption = false;
- ar9170_set_operating_mode(ar);
- mutex_unlock(&ar->mutex);
-}
-
-static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed)
-{
- struct ar9170 *ar = hw->priv;
- int err = 0;
-
- mutex_lock(&ar->mutex);
-
- if (changed & IEEE80211_CONF_CHANGE_RADIO_ENABLED) {
- /* TODO */
- err = 0;
- }
-
- if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
- /* TODO */
- err = 0;
- }
-
- if (changed & IEEE80211_CONF_CHANGE_PS) {
- /* TODO */
- err = 0;
- }
-
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- /* TODO */
- err = 0;
- }
-
- if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- /*
- * is it long_frame_max_tx_count or short_frame_max_tx_count?
- */
-
- err = ar9170_set_hwretry_limit(ar,
- ar->hw->conf.long_frame_max_tx_count);
- if (err)
- goto out;
- }
-
- if (changed & BSS_CHANGED_BEACON_INT) {
- err = ar9170_set_beacon_timers(ar);
- if (err)
- goto out;
- }
-
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- err = ar9170_set_channel(ar, hw->conf.channel,
- AR9170_RFI_NONE,
- nl80211_to_ar9170(hw->conf.channel_type));
- if (err)
- goto out;
- /* adjust slot time for 5 GHz */
- if (hw->conf.channel->band == IEEE80211_BAND_5GHZ)
- err = ar9170_write_reg(ar, AR9170_MAC_REG_SLOT_TIME,
- 9 << 10);
- }
-
-out:
- mutex_unlock(&ar->mutex);
- return err;
-}
-
-static void ar9170_set_filters(struct work_struct *work)
-{
- struct ar9170 *ar = container_of(work, struct ar9170,
- filter_config_work);
- int err;
-
- if (unlikely(!IS_STARTED(ar)))
- return ;
-
- mutex_lock(&ar->mutex);
- if (ar->filter_changed & AR9170_FILTER_CHANGED_PROMISC) {
- err = ar9170_set_operating_mode(ar);
- if (err)
- goto unlock;
- }
-
- if (ar->filter_changed & AR9170_FILTER_CHANGED_MULTICAST) {
- err = ar9170_update_multicast(ar);
- if (err)
- goto unlock;
- }
-
- if (ar->filter_changed & AR9170_FILTER_CHANGED_FRAMEFILTER)
- err = ar9170_update_frame_filter(ar);
-
-unlock:
- mutex_unlock(&ar->mutex);
-}
-
-static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *new_flags,
- int mc_count, struct dev_mc_list *mclist)
-{
- struct ar9170 *ar = hw->priv;
-
- /* mask supported flags */
- *new_flags &= FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC |
- FIF_PROMISC_IN_BSS | FIF_FCSFAIL | FIF_PLCPFAIL;
- ar->filter_state = *new_flags;
- /*
- * We can support more by setting the sniffer bit and
- * then checking the error flags, later.
- */
-
- if (changed_flags & FIF_ALLMULTI) {
- if (*new_flags & FIF_ALLMULTI) {
- ar->want_mc_hash = ~0ULL;
- } else {
- u64 mchash;
- int i;
-
- /* always get broadcast frames */
- mchash = 1ULL << (0xff>>2);
-
- for (i = 0; i < mc_count; i++) {
- if (WARN_ON(!mclist))
- break;
- mchash |= 1ULL << (mclist->dmi_addr[5] >> 2);
- mclist = mclist->next;
- }
- ar->want_mc_hash = mchash;
- }
- ar->filter_changed |= AR9170_FILTER_CHANGED_MULTICAST;
- }
-
- if (changed_flags & FIF_CONTROL) {
- u32 filter = AR9170_MAC_REG_FTF_PSPOLL |
- AR9170_MAC_REG_FTF_RTS |
- AR9170_MAC_REG_FTF_CTS |
- AR9170_MAC_REG_FTF_ACK |
- AR9170_MAC_REG_FTF_CFE |
- AR9170_MAC_REG_FTF_CFE_ACK;
-
- if (*new_flags & FIF_CONTROL)
- ar->want_filter = ar->cur_filter | filter;
- else
- ar->want_filter = ar->cur_filter & ~filter;
-
- ar->filter_changed |= AR9170_FILTER_CHANGED_FRAMEFILTER;
- }
-
- if (changed_flags & FIF_PROMISC_IN_BSS) {
- ar->sniffer_enabled = ((*new_flags) & FIF_PROMISC_IN_BSS) != 0;
- ar->filter_changed |= AR9170_FILTER_CHANGED_PROMISC;
- }
-
- if (likely(IS_STARTED(ar)))
- queue_work(ar->hw->workqueue, &ar->filter_config_work);
-}
-
-static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changed)
-{
- struct ar9170 *ar = hw->priv;
- int err = 0;
-
- mutex_lock(&ar->mutex);
-
- if (changed & BSS_CHANGED_BSSID) {
- memcpy(ar->bssid, bss_conf->bssid, ETH_ALEN);
- err = ar9170_set_operating_mode(ar);
- }
-
- if (changed & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED)) {
- err = ar9170_update_beacon(ar);
- if (!err)
- ar9170_set_beacon_timers(ar);
- }
-
- ar9170_regwrite_begin(ar);
-
- if (changed & BSS_CHANGED_ASSOC) {
- ar->state = bss_conf->assoc ? AR9170_ASSOCIATED : ar->state;
-
-#ifndef CONFIG_AR9170_LEDS
- /* enable assoc LED. */
- err = ar9170_set_leds_state(ar, bss_conf->assoc ? 2 : 0);
-#endif /* CONFIG_AR9170_LEDS */
- }
-
- if (changed & BSS_CHANGED_BEACON_INT)
- err = ar9170_set_beacon_timers(ar);
-
- if (changed & BSS_CHANGED_HT) {
- /* TODO */
- err = 0;
- }
-
- if (changed & BSS_CHANGED_ERP_SLOT) {
- u32 slottime = 20;
-
- if (bss_conf->use_short_slot)
- slottime = 9;
-
- ar9170_regwrite(AR9170_MAC_REG_SLOT_TIME, slottime << 10);
- }
-
- if (changed & BSS_CHANGED_BASIC_RATES) {
- u32 cck, ofdm;
-
- if (hw->conf.channel->band == IEEE80211_BAND_5GHZ) {
- ofdm = bss_conf->basic_rates;
- cck = 0;
- } else {
- /* four cck rates */
- cck = bss_conf->basic_rates & 0xf;
- ofdm = bss_conf->basic_rates >> 4;
- }
- ar9170_regwrite(AR9170_MAC_REG_BASIC_RATE,
- ofdm << 8 | cck);
- }
-
- ar9170_regwrite_finish();
- err = ar9170_regwrite_result();
- mutex_unlock(&ar->mutex);
-}
-
-static u64 ar9170_op_get_tsf(struct ieee80211_hw *hw)
-{
- struct ar9170 *ar = hw->priv;
- int err;
- u32 tsf_low;
- u32 tsf_high;
- u64 tsf;
-
- mutex_lock(&ar->mutex);
- err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_L, &tsf_low);
- if (!err)
- err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_H, &tsf_high);
- mutex_unlock(&ar->mutex);
-
- if (WARN_ON(err))
- return 0;
-
- tsf = tsf_high;
- tsf = (tsf << 32) | tsf_low;
- return tsf;
-}
-
-static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct ar9170 *ar = hw->priv;
- int err = 0, i;
- u8 ktype;
-
- if ((!ar->vif) || (ar->disable_offload))
- return -EOPNOTSUPP;
-
- switch (key->alg) {
- case ALG_WEP:
- if (key->keylen == WLAN_KEY_LEN_WEP40)
- ktype = AR9170_ENC_ALG_WEP64;
- else
- ktype = AR9170_ENC_ALG_WEP128;
- break;
- case ALG_TKIP:
- ktype = AR9170_ENC_ALG_TKIP;
- break;
- case ALG_CCMP:
- ktype = AR9170_ENC_ALG_AESCCMP;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- mutex_lock(&ar->mutex);
- if (cmd == SET_KEY) {
- if (unlikely(!IS_STARTED(ar))) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- /* group keys need all-zeroes address */
- if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- sta = NULL;
-
- if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
- for (i = 0; i < 64; i++)
- if (!(ar->usedkeys & BIT(i)))
- break;
- if (i == 64) {
- ar->rx_software_decryption = true;
- ar9170_set_operating_mode(ar);
- err = -ENOSPC;
- goto out;
- }
- } else {
- i = 64 + key->keyidx;
- }
-
- key->hw_key_idx = i;
-
- err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, ktype, 0,
- key->key, min_t(u8, 16, key->keylen));
- if (err)
- goto out;
-
- if (key->alg == ALG_TKIP) {
- err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL,
- ktype, 1, key->key + 16, 16);
- if (err)
- goto out;
-
- /*
- * hardware is not capable generating the MMIC
- * for fragmented frames!
- */
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- }
-
- if (i < 64)
- ar->usedkeys |= BIT(i);
-
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- } else {
- if (unlikely(!IS_STARTED(ar))) {
- /* The device is gone... together with the key ;-) */
- err = 0;
- goto out;
- }
-
- err = ar9170_disable_key(ar, key->hw_key_idx);
- if (err)
- goto out;
-
- if (key->hw_key_idx < 64) {
- ar->usedkeys &= ~BIT(key->hw_key_idx);
- } else {
- err = ar9170_upload_key(ar, key->hw_key_idx, NULL,
- AR9170_ENC_ALG_NONE, 0,
- NULL, 0);
- if (err)
- goto out;
-
- if (key->alg == ALG_TKIP) {
- err = ar9170_upload_key(ar, key->hw_key_idx,
- NULL,
- AR9170_ENC_ALG_NONE, 1,
- NULL, 0);
- if (err)
- goto out;
- }
-
- }
- }
-
- ar9170_regwrite_begin(ar);
- ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_L, ar->usedkeys);
- ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_H, ar->usedkeys >> 32);
- ar9170_regwrite_finish();
- err = ar9170_regwrite_result();
-
-out:
- mutex_unlock(&ar->mutex);
-
- return err;
-}
-
-static void ar9170_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta)
-{
- struct ar9170 *ar = hw->priv;
- struct ar9170_sta_info *info = (void *) sta->drv_priv;
- struct sk_buff *skb;
- unsigned int i;
-
- switch (cmd) {
- case STA_NOTIFY_ADD:
- for (i = 0; i < ar->hw->queues; i++)
- skb_queue_head_init(&info->tx_status[i]);
- break;
-
- case STA_NOTIFY_REMOVE:
-
- /*
- * transfer all outstanding frames that need a tx_status
- * reports to the global tx_status queue
- */
-
- for (i = 0; i < ar->hw->queues; i++) {
- while ((skb = skb_dequeue(&info->tx_status[i]))) {
-#ifdef AR9170_QUEUE_DEBUG
- printk(KERN_DEBUG "%s: queueing frame in "
- "global tx_status queue =>\n",
- wiphy_name(ar->hw->wiphy));
-
- ar9170_print_txheader(ar, skb);
-#endif /* AR9170_QUEUE_DEBUG */
- skb_queue_tail(&ar->global_tx_status, skb);
- }
- }
- queue_delayed_work(ar->hw->workqueue, &ar->tx_status_janitor,
- msecs_to_jiffies(100));
- break;
-
- default:
- break;
- }
-}
-
-static int ar9170_get_stats(struct ieee80211_hw *hw,
- struct ieee80211_low_level_stats *stats)
-{
- struct ar9170 *ar = hw->priv;
- u32 val;
- int err;
-
- mutex_lock(&ar->mutex);
- err = ar9170_read_reg(ar, AR9170_MAC_REG_TX_RETRY, &val);
- ar->stats.dot11ACKFailureCount += val;
-
- memcpy(stats, &ar->stats, sizeof(*stats));
- mutex_unlock(&ar->mutex);
-
- return 0;
-}
-
-static int ar9170_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *tx_stats)
-{
- struct ar9170 *ar = hw->priv;
-
- spin_lock_bh(&ar->tx_stats_lock);
- memcpy(tx_stats, ar->tx_stats, sizeof(tx_stats[0]) * hw->queues);
- spin_unlock_bh(&ar->tx_stats_lock);
-
- return 0;
-}
-
-static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
- const struct ieee80211_tx_queue_params *param)
-{
- struct ar9170 *ar = hw->priv;
- int ret;
-
- mutex_lock(&ar->mutex);
- if ((param) && !(queue > ar->hw->queues)) {
- memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
- param, sizeof(*param));
-
- ret = ar9170_set_qos(ar);
- } else
- ret = -EINVAL;
-
- mutex_unlock(&ar->mutex);
- return ret;
-}
-
-static int ar9170_ampdu_action(struct ieee80211_hw *hw,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
-{
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- case IEEE80211_AMPDU_RX_STOP:
- /*
- * Something goes wrong -- RX locks up
- * after a while of receiving aggregated
- * frames -- not enabling for now.
- */
- return -EOPNOTSUPP;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static const struct ieee80211_ops ar9170_ops = {
- .start = ar9170_op_start,
- .stop = ar9170_op_stop,
- .tx = ar9170_op_tx,
- .add_interface = ar9170_op_add_interface,
- .remove_interface = ar9170_op_remove_interface,
- .config = ar9170_op_config,
- .configure_filter = ar9170_op_configure_filter,
- .conf_tx = ar9170_conf_tx,
- .bss_info_changed = ar9170_op_bss_info_changed,
- .get_tsf = ar9170_op_get_tsf,
- .set_key = ar9170_set_key,
- .sta_notify = ar9170_sta_notify,
- .get_stats = ar9170_get_stats,
- .get_tx_stats = ar9170_get_tx_stats,
- .ampdu_action = ar9170_ampdu_action,
-};
-
-void *ar9170_alloc(size_t priv_size)
-{
- struct ieee80211_hw *hw;
- struct ar9170 *ar;
- struct sk_buff *skb;
- int i;
-
- /*
- * this buffer is used for rx stream reconstruction.
- * Under heavy load this device (or the transport layer?)
- * tends to split the streams into seperate rx descriptors.
- */
-
- skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL);
- if (!skb)
- goto err_nomem;
-
- hw = ieee80211_alloc_hw(priv_size, &ar9170_ops);
- if (!hw)
- goto err_nomem;
-
- ar = hw->priv;
- ar->hw = hw;
- ar->rx_failover = skb;
-
- mutex_init(&ar->mutex);
- spin_lock_init(&ar->cmdlock);
- spin_lock_init(&ar->tx_stats_lock);
- skb_queue_head_init(&ar->global_tx_status);
- skb_queue_head_init(&ar->global_tx_status_waste);
- ar9170_rx_reset_rx_mpdu(ar);
- INIT_WORK(&ar->filter_config_work, ar9170_set_filters);
- INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
- INIT_DELAYED_WORK(&ar->tx_status_janitor, ar9170_tx_status_janitor);
-
- /* all hw supports 2.4 GHz, so set channel to 1 by default */
- ar->channel = &ar9170_2ghz_chantable[0];
-
- /* first part of wiphy init */
- ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_WDS) |
- BIT(NL80211_IFTYPE_ADHOC);
- ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_NOISE_DBM;
-
- ar->hw->queues = __AR9170_NUM_TXQ;
- ar->hw->extra_tx_headroom = 8;
- ar->hw->sta_data_size = sizeof(struct ar9170_sta_info);
-
- ar->hw->max_rates = 1;
- ar->hw->max_rate_tries = 3;
-
- for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
- ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
-
- return ar;
-
-err_nomem:
- kfree_skb(skb);
- return ERR_PTR(-ENOMEM);
-}
-
-static int ar9170_read_eeprom(struct ar9170 *ar)
-{
-#define RW 8 /* number of words to read at once */
-#define RB (sizeof(u32) * RW)
- DECLARE_MAC_BUF(mbuf);
- u8 *eeprom = (void *)&ar->eeprom;
- u8 *addr = ar->eeprom.mac_address;
- __le32 offsets[RW];
- int i, j, err, bands = 0;
-
- BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
-
- BUILD_BUG_ON(RB > AR9170_MAX_CMD_LEN - 4);
-#ifndef __CHECKER__
- /* don't want to handle trailing remains */
- BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
-#endif
-
- for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
- for (j = 0; j < RW; j++)
- offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
- RB * i + 4 * j);
-
- err = ar->exec_cmd(ar, AR9170_CMD_RREG,
- RB, (u8 *) &offsets,
- RB, eeprom + RB * i);
- if (err)
- return err;
- }
-
-#undef RW
-#undef RB
-
- if (ar->eeprom.length == cpu_to_le16(0xFFFF))
- return -ENODATA;
-
- if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
- ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar9170_band_2GHz;
- bands++;
- }
- if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
- ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &ar9170_band_5GHz;
- bands++;
- }
- /*
- * I measured this, a bandswitch takes roughly
- * 135 ms and a frequency switch about 80.
- *
- * FIXME: measure these values again once EEPROM settings
- * are used, that will influence them!
- */
- if (bands == 2)
- ar->hw->channel_change_time = 135 * 1000;
- else
- ar->hw->channel_change_time = 80 * 1000;
-
- ar->regulatory.current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
- ar->regulatory.current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]);
-
- /* second part of wiphy init */
- SET_IEEE80211_PERM_ADDR(ar->hw, addr);
-
- return bands ? 0 : -EINVAL;
-}
-
-static int ar9170_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
-{
- struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct ar9170 *ar = hw->priv;
-
- return ath_reg_notifier_apply(wiphy, request, &ar->regulatory);
-}
-
-int ar9170_register(struct ar9170 *ar, struct device *pdev)
-{
- int err;
-
- /* try to read EEPROM, init MAC addr */
- err = ar9170_read_eeprom(ar);
- if (err)
- goto err_out;
-
- err = ath_regd_init(&ar->regulatory, ar->hw->wiphy,
- ar9170_reg_notifier);
- if (err)
- goto err_out;
-
- err = ieee80211_register_hw(ar->hw);
- if (err)
- goto err_out;
-
- if (!ath_is_world_regd(&ar->regulatory))
- regulatory_hint(ar->hw->wiphy, ar->regulatory.alpha2);
-
- err = ar9170_init_leds(ar);
- if (err)
- goto err_unreg;
-
-#ifdef CONFIG_AR9170_LEDS
- err = ar9170_register_leds(ar);
- if (err)
- goto err_unreg;
-#endif /* CONFIG_AR9170_LEDS */
-
- dev_info(pdev, "Atheros AR9170 is registered as '%s'\n",
- wiphy_name(ar->hw->wiphy));
-
- return err;
-
-err_unreg:
- ieee80211_unregister_hw(ar->hw);
-
-err_out:
- return err;
-}
-
-void ar9170_unregister(struct ar9170 *ar)
-{
-#ifdef CONFIG_AR9170_LEDS
- ar9170_unregister_leds(ar);
-#endif /* CONFIG_AR9170_LEDS */
-
- kfree_skb(ar->rx_failover);
- ieee80211_unregister_hw(ar->hw);
- mutex_destroy(&ar->mutex);
-}
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
deleted file mode 100644
index d7c13c0177ca..000000000000
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ /dev/null
@@ -1,826 +0,0 @@
-/*
- * Atheros AR9170 driver
- *
- * USB - frontend
- *
- * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
- * Copyright 2009, Christian Lamparter <chunkeey@web.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, see
- * http://www.gnu.org/licenses/.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * Copyright (c) 2007-2008 Atheros Communications, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/usb.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <net/mac80211.h>
-#include "ar9170.h"
-#include "cmd.h"
-#include "hw.h"
-#include "usb.h"
-
-MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
-MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Atheros AR9170 802.11n USB wireless");
-MODULE_FIRMWARE("ar9170-1.fw");
-MODULE_FIRMWARE("ar9170-2.fw");
-
-static struct usb_device_id ar9170_usb_ids[] = {
- /* Atheros 9170 */
- { USB_DEVICE(0x0cf3, 0x9170) },
- /* Atheros TG121N */
- { USB_DEVICE(0x0cf3, 0x1001) },
- /* Cace Airpcap NX */
- { USB_DEVICE(0xcace, 0x0300) },
- /* D-Link DWA 160A */
- { USB_DEVICE(0x07d1, 0x3c10) },
- /* Netgear WNDA3100 */
- { USB_DEVICE(0x0846, 0x9010) },
- /* Netgear WN111 v2 */
- { USB_DEVICE(0x0846, 0x9001) },
- /* Zydas ZD1221 */
- { USB_DEVICE(0x0ace, 0x1221) },
- /* ZyXEL NWD271N */
- { USB_DEVICE(0x0586, 0x3417) },
- /* Z-Com UB81 BG */
- { USB_DEVICE(0x0cde, 0x0023) },
- /* Z-Com UB82 ABG */
- { USB_DEVICE(0x0cde, 0x0026) },
- /* Arcadyan WN7512 */
- { USB_DEVICE(0x083a, 0xf522) },
- /* Planex GWUS300 */
- { USB_DEVICE(0x2019, 0x5304) },
- /* IO-Data WNGDNUS2 */
- { USB_DEVICE(0x04bb, 0x093f) },
-
- /* terminate */
- {}
-};
-MODULE_DEVICE_TABLE(usb, ar9170_usb_ids);
-
-static void ar9170_usb_tx_urb_complete_free(struct urb *urb)
-{
- struct sk_buff *skb = urb->context;
- struct ar9170_usb *aru = (struct ar9170_usb *)
- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
-
- if (!aru) {
- dev_kfree_skb_irq(skb);
- return ;
- }
-
- ar9170_handle_tx_status(&aru->common, skb, false,
- AR9170_TX_STATUS_COMPLETE);
-}
-
-static void ar9170_usb_tx_urb_complete(struct urb *urb)
-{
-}
-
-static void ar9170_usb_irq_completed(struct urb *urb)
-{
- struct ar9170_usb *aru = urb->context;
-
- switch (urb->status) {
- /* everything is fine */
- case 0:
- break;
-
- /* disconnect */
- case -ENOENT:
- case -ECONNRESET:
- case -ENODEV:
- case -ESHUTDOWN:
- goto free;
-
- default:
- goto resubmit;
- }
-
- print_hex_dump_bytes("ar9170 irq: ", DUMP_PREFIX_OFFSET,
- urb->transfer_buffer, urb->actual_length);
-
-resubmit:
- usb_anchor_urb(urb, &aru->rx_submitted);
- if (usb_submit_urb(urb, GFP_ATOMIC)) {
- usb_unanchor_urb(urb);
- goto free;
- }
-
- return;
-
-free:
- usb_buffer_free(aru->udev, 64, urb->transfer_buffer, urb->transfer_dma);
-}
-
-static void ar9170_usb_rx_completed(struct urb *urb)
-{
- struct sk_buff *skb = urb->context;
- struct ar9170_usb *aru = (struct ar9170_usb *)
- usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
- int err;
-
- if (!aru)
- goto free;
-
- switch (urb->status) {
- /* everything is fine */
- case 0:
- break;
-
- /* disconnect */
- case -ENOENT:
- case -ECONNRESET:
- case -ENODEV:
- case -ESHUTDOWN:
- goto free;
-
- default:
- goto resubmit;
- }
-
- skb_put(skb, urb->actual_length);
- ar9170_rx(&aru->common, skb);
-
-resubmit:
- skb_reset_tail_pointer(skb);
- skb_trim(skb, 0);
-
- usb_anchor_urb(urb, &aru->rx_submitted);
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err) {
- usb_unanchor_urb(urb);
- dev_kfree_skb_irq(skb);
- }
-
- return ;
-
-free:
- dev_kfree_skb_irq(skb);
- return;
-}
-
-static int ar9170_usb_prep_rx_urb(struct ar9170_usb *aru,
- struct urb *urb, gfp_t gfp)
-{
- struct sk_buff *skb;
-
- skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE + 32, gfp);
- if (!skb)
- return -ENOMEM;
-
- /* reserve some space for mac80211's radiotap */
- skb_reserve(skb, 32);
-
- usb_fill_bulk_urb(urb, aru->udev,
- usb_rcvbulkpipe(aru->udev, AR9170_EP_RX),
- skb->data, min(skb_tailroom(skb),
- AR9170_MAX_RX_BUFFER_SIZE),
- ar9170_usb_rx_completed, skb);
-
- return 0;
-}
-
-static int ar9170_usb_alloc_rx_irq_urb(struct ar9170_usb *aru)
-{
- struct urb *urb = NULL;
- void *ibuf;
- int err = -ENOMEM;
-
- /* initialize interrupt endpoint */
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb)
- goto out;
-
- ibuf = usb_buffer_alloc(aru->udev, 64, GFP_KERNEL, &urb->transfer_dma);
- if (!ibuf)
- goto out;
-
- usb_fill_int_urb(urb, aru->udev,
- usb_rcvintpipe(aru->udev, AR9170_EP_IRQ), ibuf,
- 64, ar9170_usb_irq_completed, aru, 1);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
-
- usb_anchor_urb(urb, &aru->rx_submitted);
- err = usb_submit_urb(urb, GFP_KERNEL);
- if (err) {
- usb_unanchor_urb(urb);
- usb_buffer_free(aru->udev, 64, urb->transfer_buffer,
- urb->transfer_dma);
- }
-
-out:
- usb_free_urb(urb);
- return err;
-}
-
-static int ar9170_usb_alloc_rx_bulk_urbs(struct ar9170_usb *aru)
-{
- struct urb *urb;
- int i;
- int err = -EINVAL;
-
- for (i = 0; i < AR9170_NUM_RX_URBS; i++) {
- err = -ENOMEM;
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb)
- goto err_out;
-
- err = ar9170_usb_prep_rx_urb(aru, urb, GFP_KERNEL);
- if (err) {
- usb_free_urb(urb);
- goto err_out;
- }
-
- usb_anchor_urb(urb, &aru->rx_submitted);
- err = usb_submit_urb(urb, GFP_KERNEL);
- if (err) {
- usb_unanchor_urb(urb);
- dev_kfree_skb_any((void *) urb->transfer_buffer);
- usb_free_urb(urb);
- goto err_out;
- }
- usb_free_urb(urb);
- }
-
- /* the device now waiting for a firmware. */
- aru->common.state = AR9170_IDLE;
- return 0;
-
-err_out:
-
- usb_kill_anchored_urbs(&aru->rx_submitted);
- return err;
-}
-
-static void ar9170_usb_cancel_urbs(struct ar9170_usb *aru)
-{
- int ret;
-
- aru->common.state = AR9170_UNKNOWN_STATE;
-
- usb_unlink_anchored_urbs(&aru->tx_submitted);
-
- /* give the LED OFF command and the deauth frame a chance to air. */
- ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted,
- msecs_to_jiffies(100));
- if (ret == 0)
- dev_err(&aru->udev->dev, "kill pending tx urbs.\n");
- usb_poison_anchored_urbs(&aru->tx_submitted);
-
- usb_poison_anchored_urbs(&aru->rx_submitted);
-}
-
-static int ar9170_usb_exec_cmd(struct ar9170 *ar, enum ar9170_cmd cmd,
- unsigned int plen, void *payload,
- unsigned int outlen, void *out)
-{
- struct ar9170_usb *aru = (void *) ar;
- struct urb *urb = NULL;
- unsigned long flags;
- int err = -ENOMEM;
-
- if (unlikely(!IS_ACCEPTING_CMD(ar)))
- return -EPERM;
-
- if (WARN_ON(plen > AR9170_MAX_CMD_LEN - 4))
- return -EINVAL;
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (unlikely(!urb))
- goto err_free;
-
- ar->cmdbuf[0] = cpu_to_le32(plen);
- ar->cmdbuf[0] |= cpu_to_le32(cmd << 8);
- /* writing multiple regs fills this buffer already */
- if (plen && payload != (u8 *)(&ar->cmdbuf[1]))
- memcpy(&ar->cmdbuf[1], payload, plen);
-
- spin_lock_irqsave(&aru->common.cmdlock, flags);
- aru->readbuf = (u8 *)out;
- aru->readlen = outlen;
- spin_unlock_irqrestore(&aru->common.cmdlock, flags);
-
- usb_fill_int_urb(urb, aru->udev,
- usb_sndbulkpipe(aru->udev, AR9170_EP_CMD),
- aru->common.cmdbuf, plen + 4,
- ar9170_usb_tx_urb_complete, NULL, 1);
-
- usb_anchor_urb(urb, &aru->tx_submitted);
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err) {
- usb_unanchor_urb(urb);
- usb_free_urb(urb);
- goto err_unbuf;
- }
- usb_free_urb(urb);
-
- err = wait_for_completion_timeout(&aru->cmd_wait, HZ);
- if (err == 0) {
- err = -ETIMEDOUT;
- goto err_unbuf;
- }
-
- if (aru->readlen != outlen) {
- err = -EMSGSIZE;
- goto err_unbuf;
- }
-
- return 0;
-
-err_unbuf:
- /* Maybe the device was removed in the second we were waiting? */
- if (IS_STARTED(ar)) {
- dev_err(&aru->udev->dev, "no command feedback "
- "received (%d).\n", err);
-
- /* provide some maybe useful debug information */
- print_hex_dump_bytes("ar9170 cmd: ", DUMP_PREFIX_NONE,
- aru->common.cmdbuf, plen + 4);
- dump_stack();
- }
-
- /* invalidate to avoid completing the next prematurely */
- spin_lock_irqsave(&aru->common.cmdlock, flags);
- aru->readbuf = NULL;
- aru->readlen = 0;
- spin_unlock_irqrestore(&aru->common.cmdlock, flags);
-
-err_free:
-
- return err;
-}
-
-static int ar9170_usb_tx(struct ar9170 *ar, struct sk_buff *skb,
- bool txstatus_needed, unsigned int extra_len)
-{
- struct ar9170_usb *aru = (struct ar9170_usb *) ar;
- struct urb *urb;
- int err;
-
- if (unlikely(!IS_STARTED(ar))) {
- /* Seriously, what were you drink... err... thinking!? */
- return -EPERM;
- }
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (unlikely(!urb))
- return -ENOMEM;
-
- usb_fill_bulk_urb(urb, aru->udev,
- usb_sndbulkpipe(aru->udev, AR9170_EP_TX),
- skb->data, skb->len + extra_len, (txstatus_needed ?
- ar9170_usb_tx_urb_complete :
- ar9170_usb_tx_urb_complete_free), skb);
- urb->transfer_flags |= URB_ZERO_PACKET;
-
- usb_anchor_urb(urb, &aru->tx_submitted);
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (unlikely(err))
- usb_unanchor_urb(urb);
-
- usb_free_urb(urb);
- return err;
-}
-
-static void ar9170_usb_callback_cmd(struct ar9170 *ar, u32 len , void *buffer)
-{
- struct ar9170_usb *aru = (void *) ar;
- unsigned long flags;
- u32 in, out;
-
- if (!buffer)
- return ;
-
- in = le32_to_cpup((__le32 *)buffer);
- out = le32_to_cpu(ar->cmdbuf[0]);
-
- /* mask off length byte */
- out &= ~0xFF;
-
- if (aru->readlen >= 0) {
- /* add expected length */
- out |= aru->readlen;
- } else {
- /* add obtained length */
- out |= in & 0xFF;
- }
-
- /*
- * Some commands (e.g: AR9170_CMD_FREQUENCY) have a variable response
- * length and we cannot predict the correct length in advance.
- * So we only check if we provided enough space for the data.
- */
- if (unlikely(out < in)) {
- dev_warn(&aru->udev->dev, "received invalid command response "
- "got %d bytes, instead of %d bytes "
- "and the resp length is %d bytes\n",
- in, out, len);
- print_hex_dump_bytes("ar9170 invalid resp: ",
- DUMP_PREFIX_OFFSET, buffer, len);
- /*
- * Do not complete, then the command times out,
- * and we get a stack trace from there.
- */
- return ;
- }
-
- spin_lock_irqsave(&aru->common.cmdlock, flags);
- if (aru->readbuf && len > 0) {
- memcpy(aru->readbuf, buffer + 4, len - 4);
- aru->readbuf = NULL;
- }
- complete(&aru->cmd_wait);
- spin_unlock_irqrestore(&aru->common.cmdlock, flags);
-}
-
-static int ar9170_usb_upload(struct ar9170_usb *aru, const void *data,
- size_t len, u32 addr, bool complete)
-{
- int transfer, err;
- u8 *buf = kmalloc(4096, GFP_KERNEL);
-
- if (!buf)
- return -ENOMEM;
-
- while (len) {
- transfer = min_t(int, len, 4096);
- memcpy(buf, data, transfer);
-
- err = usb_control_msg(aru->udev, usb_sndctrlpipe(aru->udev, 0),
- 0x30 /* FW DL */, 0x40 | USB_DIR_OUT,
- addr >> 8, 0, buf, transfer, 1000);
-
- if (err < 0) {
- kfree(buf);
- return err;
- }
-
- len -= transfer;
- data += transfer;
- addr += transfer;
- }
- kfree(buf);
-
- if (complete) {
- err = usb_control_msg(aru->udev, usb_sndctrlpipe(aru->udev, 0),
- 0x31 /* FW DL COMPLETE */,
- 0x40 | USB_DIR_OUT, 0, 0, NULL, 0, 5000);
- }
-
- return 0;
-}
-
-static int ar9170_usb_request_firmware(struct ar9170_usb *aru)
-{
- int err = 0;
-
- err = request_firmware(&aru->init_values, "ar9170-1.fw",
- &aru->udev->dev);
- if (err) {
- dev_err(&aru->udev->dev, "file with init values not found.\n");
- return err;
- }
-
- err = request_firmware(&aru->firmware, "ar9170-2.fw", &aru->udev->dev);
- if (err) {
- release_firmware(aru->init_values);
- dev_err(&aru->udev->dev, "firmware file not found.\n");
- return err;
- }
-
- return err;
-}
-
-static int ar9170_usb_reset(struct ar9170_usb *aru)
-{
- int ret, lock = (aru->intf->condition != USB_INTERFACE_BINDING);
-
- if (lock) {
- ret = usb_lock_device_for_reset(aru->udev, aru->intf);
- if (ret < 0) {
- dev_err(&aru->udev->dev, "unable to lock device "
- "for reset (%d).\n", ret);
- return ret;
- }
- }
-
- ret = usb_reset_device(aru->udev);
- if (lock)
- usb_unlock_device(aru->udev);
-
- /* let it rest - for a second - */
- msleep(1000);
-
- return ret;
-}
-
-static int ar9170_usb_upload_firmware(struct ar9170_usb *aru)
-{
- int err;
-
- /* First, upload initial values to device RAM */
- err = ar9170_usb_upload(aru, aru->init_values->data,
- aru->init_values->size, 0x102800, false);
- if (err) {
- dev_err(&aru->udev->dev, "firmware part 1 "
- "upload failed (%d).\n", err);
- return err;
- }
-
- /* Then, upload the firmware itself and start it */
- return ar9170_usb_upload(aru, aru->firmware->data, aru->firmware->size,
- 0x200000, true);
-}
-
-static int ar9170_usb_init_transport(struct ar9170_usb *aru)
-{
- struct ar9170 *ar = (void *) &aru->common;
- int err;
-
- ar9170_regwrite_begin(ar);
-
- /* Set USB Rx stream mode MAX packet number to 2 */
- ar9170_regwrite(AR9170_USB_REG_MAX_AGG_UPLOAD, 0x4);
-
- /* Set USB Rx stream mode timeout to 10us */
- ar9170_regwrite(AR9170_USB_REG_UPLOAD_TIME_CTL, 0x80);
-
- ar9170_regwrite_finish();
-
- err = ar9170_regwrite_result();
- if (err)
- dev_err(&aru->udev->dev, "USB setup failed (%d).\n", err);
-
- return err;
-}
-
-static void ar9170_usb_stop(struct ar9170 *ar)
-{
- struct ar9170_usb *aru = (void *) ar;
- int ret;
-
- if (IS_ACCEPTING_CMD(ar))
- aru->common.state = AR9170_STOPPED;
-
- /* lets wait a while until the tx - queues are dried out */
- ret = usb_wait_anchor_empty_timeout(&aru->tx_submitted,
- msecs_to_jiffies(1000));
- if (ret == 0)
- dev_err(&aru->udev->dev, "kill pending tx urbs.\n");
-
- usb_poison_anchored_urbs(&aru->tx_submitted);
-
- /*
- * Note:
- * So far we freed all tx urbs, but we won't dare to touch any rx urbs.
- * Else we would end up with a unresponsive device...
- */
-}
-
-static int ar9170_usb_open(struct ar9170 *ar)
-{
- struct ar9170_usb *aru = (void *) ar;
- int err;
-
- usb_unpoison_anchored_urbs(&aru->tx_submitted);
- err = ar9170_usb_init_transport(aru);
- if (err) {
- usb_poison_anchored_urbs(&aru->tx_submitted);
- return err;
- }
-
- aru->common.state = AR9170_IDLE;
- return 0;
-}
-
-static int ar9170_usb_init_device(struct ar9170_usb *aru)
-{
- int err;
-
- err = ar9170_usb_alloc_rx_irq_urb(aru);
- if (err)
- goto err_out;
-
- err = ar9170_usb_alloc_rx_bulk_urbs(aru);
- if (err)
- goto err_unrx;
-
- err = ar9170_usb_upload_firmware(aru);
- if (err) {
- err = ar9170_echo_test(&aru->common, 0x60d43110);
- if (err) {
- /* force user invention, by disabling the device */
- err = usb_driver_set_configuration(aru->udev, -1);
- dev_err(&aru->udev->dev, "device is in a bad state. "
- "please reconnect it!\n");
- goto err_unrx;
- }
- }
-
- return 0;
-
-err_unrx:
- ar9170_usb_cancel_urbs(aru);
-
-err_out:
- return err;
-}
-
-static int ar9170_usb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct ar9170_usb *aru;
- struct ar9170 *ar;
- struct usb_device *udev;
- int err;
-
- aru = ar9170_alloc(sizeof(*aru));
- if (IS_ERR(aru)) {
- err = PTR_ERR(aru);
- goto out;
- }
-
- udev = interface_to_usbdev(intf);
- usb_get_dev(udev);
- aru->udev = udev;
- aru->intf = intf;
- ar = &aru->common;
-
- usb_set_intfdata(intf, aru);
- SET_IEEE80211_DEV(ar->hw, &udev->dev);
-
- init_usb_anchor(&aru->rx_submitted);
- init_usb_anchor(&aru->tx_submitted);
- init_completion(&aru->cmd_wait);
-
- aru->common.stop = ar9170_usb_stop;
- aru->common.open = ar9170_usb_open;
- aru->common.tx = ar9170_usb_tx;
- aru->common.exec_cmd = ar9170_usb_exec_cmd;
- aru->common.callback_cmd = ar9170_usb_callback_cmd;
-
-#ifdef CONFIG_PM
- udev->reset_resume = 1;
-#endif
- err = ar9170_usb_reset(aru);
- if (err)
- goto err_freehw;
-
- err = ar9170_usb_request_firmware(aru);
- if (err)
- goto err_freehw;
-
- err = ar9170_usb_init_device(aru);
- if (err)
- goto err_freefw;
-
- err = ar9170_usb_open(ar);
- if (err)
- goto err_unrx;
-
- err = ar9170_register(ar, &udev->dev);
-
- ar9170_usb_stop(ar);
- if (err)
- goto err_unrx;
-
- return 0;
-
-err_unrx:
- ar9170_usb_cancel_urbs(aru);
-
-err_freefw:
- release_firmware(aru->init_values);
- release_firmware(aru->firmware);
-
-err_freehw:
- usb_set_intfdata(intf, NULL);
- usb_put_dev(udev);
- ieee80211_free_hw(ar->hw);
-out:
- return err;
-}
-
-static void ar9170_usb_disconnect(struct usb_interface *intf)
-{
- struct ar9170_usb *aru = usb_get_intfdata(intf);
-
- if (!aru)
- return;
-
- aru->common.state = AR9170_IDLE;
- ar9170_unregister(&aru->common);
- ar9170_usb_cancel_urbs(aru);
-
- release_firmware(aru->init_values);
- release_firmware(aru->firmware);
-
- usb_put_dev(aru->udev);
- usb_set_intfdata(intf, NULL);
- ieee80211_free_hw(aru->common.hw);
-}
-
-#ifdef CONFIG_PM
-static int ar9170_suspend(struct usb_interface *intf,
- pm_message_t message)
-{
- struct ar9170_usb *aru = usb_get_intfdata(intf);
-
- if (!aru)
- return -ENODEV;
-
- aru->common.state = AR9170_IDLE;
- ar9170_usb_cancel_urbs(aru);
-
- return 0;
-}
-
-static int ar9170_resume(struct usb_interface *intf)
-{
- struct ar9170_usb *aru = usb_get_intfdata(intf);
- int err;
-
- if (!aru)
- return -ENODEV;
-
- usb_unpoison_anchored_urbs(&aru->rx_submitted);
- usb_unpoison_anchored_urbs(&aru->tx_submitted);
-
- /*
- * FIXME: firmware upload will fail on resume.
- * but this is better than a hang!
- */
-
- err = ar9170_usb_init_device(aru);
- if (err)
- goto err_unrx;
-
- err = ar9170_usb_open(&aru->common);
- if (err)
- goto err_unrx;
-
- return 0;
-
-err_unrx:
- aru->common.state = AR9170_IDLE;
- ar9170_usb_cancel_urbs(aru);
-
- return err;
-}
-#endif /* CONFIG_PM */
-
-static struct usb_driver ar9170_driver = {
- .name = "ar9170usb",
- .probe = ar9170_usb_probe,
- .disconnect = ar9170_usb_disconnect,
- .id_table = ar9170_usb_ids,
- .soft_unbind = 1,
-#ifdef CONFIG_PM
- .suspend = ar9170_suspend,
- .resume = ar9170_resume,
- .reset_resume = ar9170_resume,
-#endif /* CONFIG_PM */
-};
-
-static int __init ar9170_init(void)
-{
- return usb_register(&ar9170_driver);
-}
-
-static void __exit ar9170_exit(void)
-{
- usb_deregister(&ar9170_driver);
-}
-
-module_init(ar9170_init);
-module_exit(ar9170_exit);
diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h
deleted file mode 100644
index ac42586495d8..000000000000
--- a/drivers/net/wireless/ath/ar9170/usb.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Atheros AR9170 USB driver
- *
- * Driver specific definitions
- *
- * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
- * Copyright 2009, Christian Lamparter <chunkeey@web.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; see the file COPYING. If not, see
- * http://www.gnu.org/licenses/.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * Copyright (c) 2007-2008 Atheros Communications, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-#ifndef __USB_H
-#define __USB_H
-
-#include <linux/usb.h>
-#include <linux/completion.h>
-#include <linux/spinlock.h>
-#include <linux/leds.h>
-#include <net/cfg80211.h>
-#include <net/mac80211.h>
-#include <linux/firmware.h>
-#include "eeprom.h"
-#include "hw.h"
-#include "ar9170.h"
-
-#define AR9170_NUM_RX_URBS 16
-
-struct firmware;
-
-struct ar9170_usb {
- struct ar9170 common;
- struct usb_device *udev;
- struct usb_interface *intf;
-
- struct usb_anchor rx_submitted;
- struct usb_anchor tx_submitted;
-
- spinlock_t cmdlock;
- struct completion cmd_wait;
- int readlen;
- u8 *readbuf;
-
- const struct firmware *init_values;
- const struct firmware *firmware;
-};
-
-#endif /* __USB_H */
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
new file mode 100644
index 000000000000..34654f710d8a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath.h
@@ -0,0 +1,339 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH_H
+#define ATH_H
+
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/spinlock.h>
+#include <net/mac80211.h>
+
+/*
+ * The key cache is used for h/w cipher state and also for
+ * tracking station state such as the current tx antenna.
+ * We also setup a mapping table between key cache slot indices
+ * and station state to short-circuit node lookups on rx.
+ * Different parts have different size key caches. We handle
+ * up to ATH_KEYMAX entries (could dynamically allocate state).
+ */
+#define ATH_KEYMAX 128 /* max key cache size we handle */
+
+struct ath_ani {
+ bool caldone;
+ unsigned int longcal_timer;
+ unsigned int shortcal_timer;
+ unsigned int resetcal_timer;
+ unsigned int checkani_timer;
+ struct timer_list timer;
+};
+
+struct ath_cycle_counters {
+ u32 cycles;
+ u32 rx_busy;
+ u32 rx_frame;
+ u32 tx_frame;
+};
+
+enum ath_device_state {
+ ATH_HW_UNAVAILABLE,
+ ATH_HW_INITIALIZED,
+};
+
+enum ath_op_flags {
+ ATH_OP_INVALID,
+ ATH_OP_BEACONS,
+ ATH_OP_ANI_RUN,
+ ATH_OP_PRIM_STA_VIF,
+ ATH_OP_HW_RESET,
+ ATH_OP_SCANNING,
+ ATH_OP_MULTI_CHANNEL,
+ ATH_OP_WOW_ENABLED,
+};
+
+enum ath_bus_type {
+ ATH_PCI,
+ ATH_AHB,
+ ATH_USB,
+};
+
+struct reg_dmn_pair_mapping {
+ u16 reg_domain;
+ u16 reg_5ghz_ctl;
+ u16 reg_2ghz_ctl;
+};
+
+struct ath_regulatory {
+ char alpha2[2];
+ enum nl80211_dfs_regions region;
+ u16 country_code;
+ u16 max_power_level;
+ u16 current_rd;
+ int16_t power_limit;
+ struct reg_dmn_pair_mapping *regpair;
+};
+
+enum ath_crypt_caps {
+ ATH_CRYPT_CAP_CIPHER_AESCCM = BIT(0),
+ ATH_CRYPT_CAP_MIC_COMBINED = BIT(1),
+};
+
+struct ath_keyval {
+ u8 kv_type;
+ u8 kv_pad;
+ u16 kv_len;
+ struct_group(kv_values,
+ u8 kv_val[16]; /* TK */
+ u8 kv_mic[8]; /* Michael MIC key */
+ u8 kv_txmic[8]; /* Michael MIC TX key (used only if the hardware
+ * supports both MIC keys in the same key cache entry;
+ * in that case, kv_mic is the RX key) */
+ );
+};
+
+enum ath_cipher {
+ ATH_CIPHER_WEP = 0,
+ ATH_CIPHER_AES_OCB = 1,
+ ATH_CIPHER_AES_CCM = 2,
+ ATH_CIPHER_CKIP = 3,
+ ATH_CIPHER_TKIP = 4,
+ ATH_CIPHER_CLR = 5,
+ ATH_CIPHER_MIC = 127
+};
+
+/**
+ * struct ath_ops - Register read/write operations
+ *
+ * @read: Register read
+ * @multi_read: Multiple register read
+ * @write: Register write
+ * @enable_write_buffer: Enable multiple register writes
+ * @write_flush: flush buffered register writes and disable buffering
+ */
+struct ath_ops {
+ unsigned int (*read)(void *, u32 reg_offset);
+ void (*multi_read)(void *, u32 *addr, u32 *val, u16 count);
+ void (*write)(void *, u32 val, u32 reg_offset);
+ void (*enable_write_buffer)(void *);
+ void (*write_flush) (void *);
+ u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr);
+ void (*enable_rmw_buffer)(void *);
+ void (*rmw_flush) (void *);
+
+};
+
+struct ath_common;
+struct ath_bus_ops;
+
+struct ath_ps_ops {
+ void (*wakeup)(struct ath_common *common);
+ void (*restore)(struct ath_common *common);
+};
+
+struct ath_common {
+ void *ah;
+ void *priv;
+ struct ieee80211_hw *hw;
+ int debug_mask;
+ enum ath_device_state state;
+ unsigned long op_flags;
+
+ struct ath_ani ani;
+
+ u16 cachelsz;
+ u16 curaid;
+ u8 macaddr[ETH_ALEN];
+ u8 curbssid[ETH_ALEN] __aligned(2);
+ u8 bssidmask[ETH_ALEN];
+
+ u32 rx_bufsize;
+
+ u32 keymax;
+ DECLARE_BITMAP(keymap, ATH_KEYMAX);
+ DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
+ DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
+ enum ath_crypt_caps crypt_caps;
+
+ unsigned int clockrate;
+
+ spinlock_t cc_lock;
+ struct_group(cc,
+ struct ath_cycle_counters cc_ani;
+ struct ath_cycle_counters cc_survey;
+ );
+
+ struct ath_regulatory regulatory;
+ struct ath_regulatory reg_world_copy;
+ const struct ath_ops *ops;
+ const struct ath_bus_ops *bus_ops;
+ const struct ath_ps_ops *ps_ops;
+
+ bool btcoex_enabled;
+ bool disable_ani;
+ bool bt_ant_diversity;
+
+ int last_rssi;
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
+};
+
+static inline const struct ath_ps_ops *ath_ps_ops(struct ath_common *common)
+{
+ return common->ps_ops;
+}
+
+struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
+ u32 len,
+ gfp_t gfp_mask);
+bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr);
+
+void ath_hw_setbssidmask(struct ath_common *common);
+void ath_key_delete(struct ath_common *common, u8 hw_key_idx);
+int ath_key_config(struct ath_common *common,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+bool ath_hw_keyreset(struct ath_common *common, u16 entry);
+bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac);
+void ath_hw_cycle_counters_update(struct ath_common *common);
+int32_t ath_hw_get_listen_time(struct ath_common *common);
+
+__printf(3, 4)
+void ath_printk(const char *level, const struct ath_common *common,
+ const char *fmt, ...);
+
+#define ath_emerg(common, fmt, ...) \
+ ath_printk(KERN_EMERG, common, fmt, ##__VA_ARGS__)
+#define ath_alert(common, fmt, ...) \
+ ath_printk(KERN_ALERT, common, fmt, ##__VA_ARGS__)
+#define ath_crit(common, fmt, ...) \
+ ath_printk(KERN_CRIT, common, fmt, ##__VA_ARGS__)
+#define ath_err(common, fmt, ...) \
+ ath_printk(KERN_ERR, common, fmt, ##__VA_ARGS__)
+#define ath_warn(common, fmt, ...) \
+ ath_printk(KERN_WARNING, common, fmt, ##__VA_ARGS__)
+#define ath_notice(common, fmt, ...) \
+ ath_printk(KERN_NOTICE, common, fmt, ##__VA_ARGS__)
+#define ath_info(common, fmt, ...) \
+ ath_printk(KERN_INFO, common, fmt, ##__VA_ARGS__)
+
+/**
+ * enum ath_debug_level - atheros wireless debug level
+ *
+ * @ATH_DBG_RESET: reset processing
+ * @ATH_DBG_QUEUE: hardware queue management
+ * @ATH_DBG_EEPROM: eeprom processing
+ * @ATH_DBG_CALIBRATE: periodic calibration
+ * @ATH_DBG_INTERRUPT: interrupt processing
+ * @ATH_DBG_REGULATORY: regulatory processing
+ * @ATH_DBG_ANI: adaptive noise immunitive processing
+ * @ATH_DBG_XMIT: basic xmit operation
+ * @ATH_DBG_BEACON: beacon handling
+ * @ATH_DBG_CONFIG: configuration of the hardware
+ * @ATH_DBG_FATAL: fatal errors, this is the default, DBG_DEFAULT
+ * @ATH_DBG_PS: power save processing
+ * @ATH_DBG_HWTIMER: hardware timer handling
+ * @ATH_DBG_BTCOEX: bluetooth coexistance
+ * @ATH_DBG_BSTUCK: stuck beacons
+ * @ATH_DBG_MCI: Message Coexistence Interface, a private protocol
+ * used exclusively for WLAN-BT coexistence starting from
+ * AR9462.
+ * @ATH_DBG_DFS: radar datection
+ * @ATH_DBG_WOW: Wake on Wireless
+ * @ATH_DBG_DYNACK: dynack handling
+ * @ATH_DBG_SPECTRAL_SCAN: FFT spectral scan
+ * @ATH_DBG_ANY: enable all debugging
+ *
+ * The debug level is used to control the amount and type of debugging output
+ * we want to see. Each driver has its own method for enabling debugging and
+ * modifying debug level states -- but this is typically done through a
+ * module parameter 'debug' along with a respective 'debug' debugfs file
+ * entry.
+ */
+enum ATH_DEBUG {
+ ATH_DBG_RESET = 0x00000001,
+ ATH_DBG_QUEUE = 0x00000002,
+ ATH_DBG_EEPROM = 0x00000004,
+ ATH_DBG_CALIBRATE = 0x00000008,
+ ATH_DBG_INTERRUPT = 0x00000010,
+ ATH_DBG_REGULATORY = 0x00000020,
+ ATH_DBG_ANI = 0x00000040,
+ ATH_DBG_XMIT = 0x00000080,
+ ATH_DBG_BEACON = 0x00000100,
+ ATH_DBG_CONFIG = 0x00000200,
+ ATH_DBG_FATAL = 0x00000400,
+ ATH_DBG_PS = 0x00000800,
+ ATH_DBG_BTCOEX = 0x00001000,
+ ATH_DBG_WMI = 0x00002000,
+ ATH_DBG_BSTUCK = 0x00004000,
+ ATH_DBG_MCI = 0x00008000,
+ ATH_DBG_DFS = 0x00010000,
+ ATH_DBG_WOW = 0x00020000,
+ ATH_DBG_CHAN_CTX = 0x00040000,
+ ATH_DBG_DYNACK = 0x00080000,
+ ATH_DBG_SPECTRAL_SCAN = 0x00100000,
+ ATH_DBG_ANY = 0xffffffff
+};
+
+#define ATH_DBG_DEFAULT (ATH_DBG_FATAL)
+#define ATH_DBG_MAX_LEN 512
+
+#ifdef CONFIG_ATH_DEBUG
+
+#define ath_dbg(common, dbg_mask, fmt, ...) \
+do { \
+ if ((common)->debug_mask & ATH_DBG_##dbg_mask) \
+ ath_printk(KERN_DEBUG, common, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define ATH_DBG_WARN(foo, arg...) WARN(foo, arg)
+#define ATH_DBG_WARN_ON_ONCE(foo) WARN_ON_ONCE(foo)
+
+#else
+
+static inline __attribute__ ((format (printf, 3, 4)))
+void _ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
+ const char *fmt, ...)
+{
+}
+#define ath_dbg(common, dbg_mask, fmt, ...) \
+ _ath_dbg(common, ATH_DBG_##dbg_mask, fmt, ##__VA_ARGS__)
+
+#define ATH_DBG_WARN(foo, arg...) do {} while (0)
+#define ATH_DBG_WARN_ON_ONCE(foo) ({ \
+ int __ret_warn_once = !!(foo); \
+ unlikely(__ret_warn_once); \
+})
+
+#endif /* CONFIG_ATH_DEBUG */
+
+/** Returns string describing opmode, or NULL if unknown mode. */
+#ifdef CONFIG_ATH_DEBUG
+const char *ath_opmode_to_string(enum nl80211_iftype opmode);
+#else
+static inline const char *ath_opmode_to_string(enum nl80211_iftype opmode)
+{
+ return "UNKNOWN";
+}
+#endif
+
+extern const char *ath_bus_type_strings[];
+static inline const char *ath_bus_type_to_string(enum ath_bus_type bustype)
+{
+ return ath_bus_type_strings[bustype];
+}
+
+#endif /* ATH_H */
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
new file mode 100644
index 000000000000..876aed765833
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -0,0 +1,98 @@
+# SPDX-License-Identifier: ISC
+config ATH10K
+ tristate "Atheros 802.11ac wireless cards support"
+ depends on MAC80211 && HAS_DMA
+ select ATH_COMMON
+ select CRC32
+ select WANT_DEV_COREDUMP
+ select ATH10K_CE
+ help
+ This module adds support for wireless adapters based on
+ Atheros IEEE 802.11ac family of chipsets.
+
+ If you choose to build a module, it'll be called ath10k.
+
+config ATH10K_CE
+ bool
+
+config ATH10K_PCI
+ tristate "Atheros ath10k PCI support"
+ depends on ATH10K && PCI
+ help
+ This module adds support for PCIE bus
+
+config ATH10K_AHB
+ bool "Atheros ath10k AHB support"
+ depends on ATH10K_PCI && OF && RESET_CONTROLLER
+ help
+ This module adds support for AHB bus
+
+config ATH10K_SDIO
+ tristate "Atheros ath10k SDIO support"
+ depends on ATH10K && MMC
+ help
+ This module adds support for SDIO/MMC bus.
+
+config ATH10K_USB
+ tristate "Atheros ath10k USB support (EXPERIMENTAL)"
+ depends on ATH10K && USB
+ help
+ This module adds experimental support for USB bus. Currently
+ work in progress and will not fully work.
+
+config ATH10K_SNOC
+ tristate "Qualcomm ath10k SNOC support"
+ depends on ATH10K
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on QCOM_SMEM
+ depends on QCOM_RPROC_COMMON || QCOM_RPROC_COMMON=n
+ select QCOM_SCM
+ select QCOM_QMI_HELPERS
+ help
+ This module adds support for integrated WCN3990 chip connected
+ to system NOC(SNOC).
+
+config ATH10K_DEBUG
+ bool "Atheros ath10k debugging"
+ depends on ATH10K
+ help
+ Enables debug support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH10K_DEBUGFS
+ bool "Atheros ath10k debugfs support"
+ depends on ATH10K && DEBUG_FS
+ help
+ Enabled debugfs support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH10K_LEDS
+ bool
+ depends on ATH10K
+ depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
+ default y
+
+config ATH10K_SPECTRAL
+ bool "Atheros ath10k spectral scan support"
+ depends on ATH10K_DEBUGFS
+ select RELAY
+ default n
+ help
+ Say Y to enable access to the FFT/spectral data via debugfs.
+
+config ATH10K_TRACING
+ bool "Atheros ath10k tracing support"
+ depends on ATH10K
+ depends on EVENT_TRACING
+ help
+ Select this to ath10k use tracing infrastructure.
+
+config ATH10K_DFS_CERTIFIED
+ bool "Atheros DFS support for certified platforms"
+ depends on ATH10K && CFG80211_CERTIFICATION_ONUS
+ default n
+ help
+ This option enables DFS support for initiating radiation on
+ ath10k.
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
new file mode 100644
index 000000000000..02bf9b629038
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: ISC
+obj-$(CONFIG_ATH10K) += ath10k_core.o
+ath10k_core-y += mac.o \
+ debug.o \
+ core.o \
+ htc.o \
+ htt.o \
+ htt_rx.o \
+ htt_tx.o \
+ txrx.o \
+ wmi.o \
+ wmi-tlv.o \
+ bmi.o \
+ hw.o \
+ p2p.o \
+ swap.o
+
+ath10k_core-$(CONFIG_ATH10K_SPECTRAL) += spectral.o
+ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
+ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
+ath10k_core-$(CONFIG_THERMAL) += thermal.o
+ath10k_core-$(CONFIG_ATH10K_LEDS) += leds.o
+ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
+ath10k_core-$(CONFIG_PM) += wow.o
+ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
+ath10k_core-$(CONFIG_ATH10K_CE) += ce.o
+
+obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
+ath10k_pci-y += pci.o
+
+ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o
+
+obj-$(CONFIG_ATH10K_SDIO) += ath10k_sdio.o
+ath10k_sdio-y += sdio.o
+
+obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o
+ath10k_usb-y += usb.o
+
+obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o
+ath10k_snoc-y += qmi.o \
+ qmi_wlfw_v01.o \
+ snoc.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
new file mode 100644
index 000000000000..eb8b35b6224d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -0,0 +1,862 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2016-2017 Qualcomm Atheros, Inc. All rights reserved.
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include "core.h"
+#include "debug.h"
+#include "pci.h"
+#include "ahb.h"
+
+static const struct of_device_id ath10k_ahb_of_match[] = {
+ { .compatible = "qcom,ipq4019-wifi",
+ .data = (void *)ATH10K_HW_QCA4019
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match);
+
+#define QCA4019_SRAM_ADDR 0x000C0000
+#define QCA4019_SRAM_LEN 0x00040000 /* 256 kb */
+
+static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar)
+{
+ return &ath10k_pci_priv(ar)->ahb[0];
+}
+
+static void ath10k_ahb_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ iowrite32(value, ar_ahb->mem + offset);
+}
+
+static u32 ath10k_ahb_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ return ioread32(ar_ahb->mem + offset);
+}
+
+static u32 ath10k_ahb_gcc_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ return ioread32(ar_ahb->gcc_mem + offset);
+}
+
+static void ath10k_ahb_tcsr_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ iowrite32(value, ar_ahb->tcsr_mem + offset);
+}
+
+static u32 ath10k_ahb_tcsr_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ return ioread32(ar_ahb->tcsr_mem + offset);
+}
+
+static u32 ath10k_ahb_soc_read32(struct ath10k *ar, u32 addr)
+{
+ return ath10k_ahb_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
+}
+
+static int ath10k_ahb_get_num_banks(struct ath10k *ar)
+{
+ if (ar->hw_rev == ATH10K_HW_QCA4019)
+ return 1;
+
+ ath10k_warn(ar, "unknown number of banks, assuming 1\n");
+ return 1;
+}
+
+static int ath10k_ahb_clock_init(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct device *dev;
+
+ dev = &ar_ahb->pdev->dev;
+
+ ar_ahb->cmd_clk = devm_clk_get(dev, "wifi_wcss_cmd");
+ if (IS_ERR_OR_NULL(ar_ahb->cmd_clk)) {
+ ath10k_err(ar, "failed to get cmd clk: %ld\n",
+ PTR_ERR(ar_ahb->cmd_clk));
+ return ar_ahb->cmd_clk ? PTR_ERR(ar_ahb->cmd_clk) : -ENODEV;
+ }
+
+ ar_ahb->ref_clk = devm_clk_get(dev, "wifi_wcss_ref");
+ if (IS_ERR_OR_NULL(ar_ahb->ref_clk)) {
+ ath10k_err(ar, "failed to get ref clk: %ld\n",
+ PTR_ERR(ar_ahb->ref_clk));
+ return ar_ahb->ref_clk ? PTR_ERR(ar_ahb->ref_clk) : -ENODEV;
+ }
+
+ ar_ahb->rtc_clk = devm_clk_get(dev, "wifi_wcss_rtc");
+ if (IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
+ ath10k_err(ar, "failed to get rtc clk: %ld\n",
+ PTR_ERR(ar_ahb->rtc_clk));
+ return ar_ahb->rtc_clk ? PTR_ERR(ar_ahb->rtc_clk) : -ENODEV;
+ }
+
+ return 0;
+}
+
+static void ath10k_ahb_clock_deinit(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ ar_ahb->cmd_clk = NULL;
+ ar_ahb->ref_clk = NULL;
+ ar_ahb->rtc_clk = NULL;
+}
+
+static int ath10k_ahb_clock_enable(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ int ret;
+
+ if (IS_ERR_OR_NULL(ar_ahb->cmd_clk) ||
+ IS_ERR_OR_NULL(ar_ahb->ref_clk) ||
+ IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
+ ath10k_err(ar, "clock(s) is/are not initialized\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = clk_prepare_enable(ar_ahb->cmd_clk);
+ if (ret) {
+ ath10k_err(ar, "failed to enable cmd clk: %d\n", ret);
+ goto out;
+ }
+
+ ret = clk_prepare_enable(ar_ahb->ref_clk);
+ if (ret) {
+ ath10k_err(ar, "failed to enable ref clk: %d\n", ret);
+ goto err_cmd_clk_disable;
+ }
+
+ ret = clk_prepare_enable(ar_ahb->rtc_clk);
+ if (ret) {
+ ath10k_err(ar, "failed to enable rtc clk: %d\n", ret);
+ goto err_ref_clk_disable;
+ }
+
+ return 0;
+
+err_ref_clk_disable:
+ clk_disable_unprepare(ar_ahb->ref_clk);
+
+err_cmd_clk_disable:
+ clk_disable_unprepare(ar_ahb->cmd_clk);
+
+out:
+ return ret;
+}
+
+static void ath10k_ahb_clock_disable(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ clk_disable_unprepare(ar_ahb->cmd_clk);
+
+ clk_disable_unprepare(ar_ahb->ref_clk);
+
+ clk_disable_unprepare(ar_ahb->rtc_clk);
+}
+
+static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct device *dev;
+
+ dev = &ar_ahb->pdev->dev;
+
+ ar_ahb->core_cold_rst = devm_reset_control_get_exclusive(dev,
+ "wifi_core_cold");
+ if (IS_ERR(ar_ahb->core_cold_rst)) {
+ ath10k_err(ar, "failed to get core cold rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->core_cold_rst));
+ return PTR_ERR(ar_ahb->core_cold_rst);
+ }
+
+ ar_ahb->radio_cold_rst = devm_reset_control_get_exclusive(dev,
+ "wifi_radio_cold");
+ if (IS_ERR(ar_ahb->radio_cold_rst)) {
+ ath10k_err(ar, "failed to get radio cold rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->radio_cold_rst));
+ return PTR_ERR(ar_ahb->radio_cold_rst);
+ }
+
+ ar_ahb->radio_warm_rst = devm_reset_control_get_exclusive(dev,
+ "wifi_radio_warm");
+ if (IS_ERR(ar_ahb->radio_warm_rst)) {
+ ath10k_err(ar, "failed to get radio warm rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->radio_warm_rst));
+ return PTR_ERR(ar_ahb->radio_warm_rst);
+ }
+
+ ar_ahb->radio_srif_rst = devm_reset_control_get_exclusive(dev,
+ "wifi_radio_srif");
+ if (IS_ERR(ar_ahb->radio_srif_rst)) {
+ ath10k_err(ar, "failed to get radio srif rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->radio_srif_rst));
+ return PTR_ERR(ar_ahb->radio_srif_rst);
+ }
+
+ ar_ahb->cpu_init_rst = devm_reset_control_get_exclusive(dev,
+ "wifi_cpu_init");
+ if (IS_ERR(ar_ahb->cpu_init_rst)) {
+ ath10k_err(ar, "failed to get cpu init rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->cpu_init_rst));
+ return PTR_ERR(ar_ahb->cpu_init_rst);
+ }
+
+ return 0;
+}
+
+static void ath10k_ahb_rst_ctrl_deinit(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ ar_ahb->core_cold_rst = NULL;
+ ar_ahb->radio_cold_rst = NULL;
+ ar_ahb->radio_warm_rst = NULL;
+ ar_ahb->radio_srif_rst = NULL;
+ ar_ahb->cpu_init_rst = NULL;
+}
+
+static int ath10k_ahb_release_reset(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ int ret;
+
+ if (IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) {
+ ath10k_err(ar, "rst ctrl(s) is/are not initialized\n");
+ return -EINVAL;
+ }
+
+ ret = reset_control_deassert(ar_ahb->radio_cold_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert radio cold rst: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(ar_ahb->radio_warm_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert radio warm rst: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(ar_ahb->radio_srif_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert radio srif rst: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(ar_ahb->cpu_init_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert cpu init rst: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_ahb_halt_axi_bus(struct ath10k *ar, u32 haltreq_reg,
+ u32 haltack_reg)
+{
+ unsigned long timeout;
+ u32 val;
+
+ /* Issue halt axi bus request */
+ val = ath10k_ahb_tcsr_read32(ar, haltreq_reg);
+ val |= AHB_AXI_BUS_HALT_REQ;
+ ath10k_ahb_tcsr_write32(ar, haltreq_reg, val);
+
+ /* Wait for axi bus halted ack */
+ timeout = jiffies + msecs_to_jiffies(ATH10K_AHB_AXI_BUS_HALT_TIMEOUT);
+ do {
+ val = ath10k_ahb_tcsr_read32(ar, haltack_reg);
+ if (val & AHB_AXI_BUS_HALT_ACK)
+ break;
+
+ mdelay(1);
+ } while (time_before(jiffies, timeout));
+
+ if (!(val & AHB_AXI_BUS_HALT_ACK)) {
+ ath10k_err(ar, "failed to halt axi bus: %d\n", val);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_AHB, "axi bus halted\n");
+}
+
+static void ath10k_ahb_halt_chip(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ u32 core_id, glb_cfg_reg, haltreq_reg, haltack_reg;
+ u32 val;
+ int ret;
+
+ if (IS_ERR_OR_NULL(ar_ahb->core_cold_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) {
+ ath10k_err(ar, "rst ctrl(s) is/are not initialized\n");
+ return;
+ }
+
+ core_id = ath10k_ahb_read32(ar, ATH10K_AHB_WLAN_CORE_ID_REG);
+
+ switch (core_id) {
+ case 0:
+ glb_cfg_reg = ATH10K_AHB_TCSR_WIFI0_GLB_CFG;
+ haltreq_reg = ATH10K_AHB_TCSR_WCSS0_HALTREQ;
+ haltack_reg = ATH10K_AHB_TCSR_WCSS0_HALTACK;
+ break;
+ case 1:
+ glb_cfg_reg = ATH10K_AHB_TCSR_WIFI1_GLB_CFG;
+ haltreq_reg = ATH10K_AHB_TCSR_WCSS1_HALTREQ;
+ haltack_reg = ATH10K_AHB_TCSR_WCSS1_HALTACK;
+ break;
+ default:
+ ath10k_err(ar, "invalid core id %d found, skipping reset sequence\n",
+ core_id);
+ return;
+ }
+
+ ath10k_ahb_halt_axi_bus(ar, haltreq_reg, haltack_reg);
+
+ val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg);
+ val |= TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK;
+ ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val);
+
+ ret = reset_control_assert(ar_ahb->core_cold_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert core cold rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->radio_cold_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert radio cold rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->radio_warm_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert radio warm rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->radio_srif_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert radio srif rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->cpu_init_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert cpu init rst: %d\n", ret);
+ msleep(10);
+
+ /* Clear halt req and core clock disable req before
+ * deasserting wifi core reset.
+ */
+ val = ath10k_ahb_tcsr_read32(ar, haltreq_reg);
+ val &= ~AHB_AXI_BUS_HALT_REQ;
+ ath10k_ahb_tcsr_write32(ar, haltreq_reg, val);
+
+ val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg);
+ val &= ~TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK;
+ ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val);
+
+ ret = reset_control_deassert(ar_ahb->core_cold_rst);
+ if (ret)
+ ath10k_err(ar, "failed to deassert core cold rst: %d\n", ret);
+
+ ath10k_dbg(ar, ATH10K_DBG_AHB, "core %d reset done\n", core_id);
+}
+
+static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+
+ if (!ath10k_pci_irq_pending(ar))
+ return IRQ_NONE;
+
+ ath10k_pci_disable_and_clear_intx_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+ napi_schedule(&ar->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath10k_ahb_request_irq_intx(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ int ret;
+
+ ret = request_irq(ar_ahb->irq,
+ ath10k_ahb_interrupt_handler,
+ IRQF_SHARED, "ath10k_ahb", ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
+ ar_ahb->irq, ret);
+ return ret;
+ }
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_INTX;
+
+ return 0;
+}
+
+static void ath10k_ahb_release_irq_intx(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ free_irq(ar_ahb->irq, ar);
+}
+
+static void ath10k_ahb_irq_disable(struct ath10k *ar)
+{
+ ath10k_ce_disable_interrupts(ar);
+ ath10k_pci_disable_and_clear_intx_irq(ar);
+}
+
+static int ath10k_ahb_resource_init(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct platform_device *pdev;
+ struct resource *res;
+ int ret;
+
+ pdev = ar_ahb->pdev;
+
+ ar_ahb->mem = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(ar_ahb->mem)) {
+ ath10k_err(ar, "mem ioremap error\n");
+ ret = PTR_ERR(ar_ahb->mem);
+ goto out;
+ }
+
+ ar_ahb->mem_len = resource_size(res);
+
+ ar_ahb->gcc_mem = ioremap(ATH10K_GCC_REG_BASE,
+ ATH10K_GCC_REG_SIZE);
+ if (!ar_ahb->gcc_mem) {
+ ath10k_err(ar, "gcc mem ioremap error\n");
+ ret = -ENOMEM;
+ goto err_mem_unmap;
+ }
+
+ ar_ahb->tcsr_mem = ioremap(ATH10K_TCSR_REG_BASE,
+ ATH10K_TCSR_REG_SIZE);
+ if (!ar_ahb->tcsr_mem) {
+ ath10k_err(ar, "tcsr mem ioremap error\n");
+ ret = -ENOMEM;
+ goto err_gcc_mem_unmap;
+ }
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ ath10k_err(ar, "failed to set 32-bit dma mask: %d\n", ret);
+ goto err_tcsr_mem_unmap;
+ }
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ ath10k_err(ar, "failed to set 32-bit consistent dma: %d\n",
+ ret);
+ goto err_tcsr_mem_unmap;
+ }
+
+ ret = ath10k_ahb_clock_init(ar);
+ if (ret)
+ goto err_tcsr_mem_unmap;
+
+ ret = ath10k_ahb_rst_ctrl_init(ar);
+ if (ret)
+ goto err_clock_deinit;
+
+ ar_ahb->irq = platform_get_irq_byname(pdev, "legacy");
+ if (ar_ahb->irq < 0) {
+ ath10k_err(ar, "failed to get irq number: %d\n", ar_ahb->irq);
+ ret = ar_ahb->irq;
+ goto err_clock_deinit;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%p mem_len: %lu gcc mem: 0x%p tcsr_mem: 0x%p\n",
+ ar_ahb->mem, ar_ahb->mem_len,
+ ar_ahb->gcc_mem, ar_ahb->tcsr_mem);
+ return 0;
+
+err_clock_deinit:
+ ath10k_ahb_clock_deinit(ar);
+
+err_tcsr_mem_unmap:
+ iounmap(ar_ahb->tcsr_mem);
+
+err_gcc_mem_unmap:
+ ar_ahb->tcsr_mem = NULL;
+ iounmap(ar_ahb->gcc_mem);
+
+err_mem_unmap:
+ ar_ahb->gcc_mem = NULL;
+ devm_iounmap(&pdev->dev, ar_ahb->mem);
+
+out:
+ ar_ahb->mem = NULL;
+ return ret;
+}
+
+static void ath10k_ahb_resource_deinit(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct device *dev;
+
+ dev = &ar_ahb->pdev->dev;
+
+ if (ar_ahb->mem)
+ devm_iounmap(dev, ar_ahb->mem);
+
+ if (ar_ahb->gcc_mem)
+ iounmap(ar_ahb->gcc_mem);
+
+ if (ar_ahb->tcsr_mem)
+ iounmap(ar_ahb->tcsr_mem);
+
+ ar_ahb->mem = NULL;
+ ar_ahb->gcc_mem = NULL;
+ ar_ahb->tcsr_mem = NULL;
+
+ ath10k_ahb_clock_deinit(ar);
+ ath10k_ahb_rst_ctrl_deinit(ar);
+}
+
+static int ath10k_ahb_prepare_device(struct ath10k *ar)
+{
+ u32 val;
+ int ret;
+
+ ret = ath10k_ahb_clock_enable(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to enable clocks\n");
+ return ret;
+ }
+
+ /* Clock for the target is supplied from outside of target (ie,
+ * external clock module controlled by the host). Target needs
+ * to know what frequency target cpu is configured which is needed
+ * for target internal use. Read target cpu frequency info from
+ * gcc register and write into target's scratch register where
+ * target expects this information.
+ */
+ val = ath10k_ahb_gcc_read32(ar, ATH10K_AHB_GCC_FEPLL_PLL_DIV);
+ ath10k_ahb_write32(ar, ATH10K_AHB_WIFI_SCRATCH_5_REG, val);
+
+ ret = ath10k_ahb_release_reset(ar);
+ if (ret)
+ goto err_clk_disable;
+
+ ath10k_ahb_irq_disable(ar);
+
+ ath10k_ahb_write32(ar, FW_INDICATOR_ADDRESS, FW_IND_HOST_READY);
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret)
+ goto err_halt_chip;
+
+ return 0;
+
+err_halt_chip:
+ ath10k_ahb_halt_chip(ar);
+
+err_clk_disable:
+ ath10k_ahb_clock_disable(ar);
+
+ return ret;
+}
+
+static int ath10k_ahb_chip_reset(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_ahb_halt_chip(ar);
+ ath10k_ahb_clock_disable(ar);
+
+ ret = ath10k_ahb_prepare_device(ar);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ath10k_ahb_wake_target_cpu(struct ath10k *ar)
+{
+ u32 addr, val;
+
+ addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
+ val = ath10k_ahb_read32(ar, addr);
+ val |= ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK;
+ ath10k_ahb_write32(ar, addr, val);
+
+ return 0;
+}
+
+static int ath10k_ahb_hif_start(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif start\n");
+
+ ath10k_core_napi_enable(ar);
+ ath10k_ce_enable_interrupts(ar);
+ ath10k_pci_enable_intx_irq(ar);
+
+ ath10k_pci_rx_post(ar);
+
+ return 0;
+}
+
+static void ath10k_ahb_hif_stop(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif stop\n");
+
+ ath10k_ahb_irq_disable(ar);
+ synchronize_irq(ar_ahb->irq);
+
+ ath10k_core_napi_sync_disable(ar);
+
+ ath10k_pci_flush(ar);
+}
+
+static int ath10k_ahb_hif_power_up(struct ath10k *ar,
+ enum ath10k_firmware_mode fw_mode)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif power up\n");
+
+ ret = ath10k_ahb_chip_reset(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to reset chip: %d\n", ret);
+ goto out;
+ }
+
+ ret = ath10k_pci_init_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+ goto out;
+ }
+
+ ret = ath10k_pci_init_config(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to setup init config: %d\n", ret);
+ goto err_ce_deinit;
+ }
+
+ ret = ath10k_ahb_wake_target_cpu(ar);
+ if (ret) {
+ ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
+ goto err_ce_deinit;
+ }
+
+ return 0;
+
+err_ce_deinit:
+ ath10k_pci_ce_deinit(ar);
+out:
+ return ret;
+}
+
+static u32 ath10k_ahb_qca4019_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+
+ if (region >= QCA4019_SRAM_ADDR && region <=
+ (QCA4019_SRAM_ADDR + QCA4019_SRAM_LEN)) {
+ /* SRAM contents for QCA4019 can be directly accessed and
+ * no conversions are required
+ */
+ val |= region;
+ } else {
+ val |= 0x100000 | region;
+ }
+
+ return val;
+}
+
+static const struct ath10k_hif_ops ath10k_ahb_hif_ops = {
+ .tx_sg = ath10k_pci_hif_tx_sg,
+ .diag_read = ath10k_pci_hif_diag_read,
+ .diag_write = ath10k_pci_diag_write_mem,
+ .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
+ .start = ath10k_ahb_hif_start,
+ .stop = ath10k_ahb_hif_stop,
+ .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_pci_hif_get_default_pipe,
+ .send_complete_check = ath10k_pci_hif_send_complete_check,
+ .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
+ .power_up = ath10k_ahb_hif_power_up,
+ .power_down = ath10k_pci_hif_power_down,
+ .read32 = ath10k_ahb_read32,
+ .write32 = ath10k_ahb_write32,
+};
+
+static const struct ath10k_bus_ops ath10k_ahb_bus_ops = {
+ .read32 = ath10k_ahb_read32,
+ .write32 = ath10k_ahb_write32,
+ .get_num_banks = ath10k_ahb_get_num_banks,
+};
+
+static int ath10k_ahb_probe(struct platform_device *pdev)
+{
+ struct ath10k *ar;
+ struct ath10k_ahb *ar_ahb;
+ struct ath10k_pci *ar_pci;
+ enum ath10k_hw_rev hw_rev;
+ size_t size;
+ int ret;
+ struct ath10k_bus_params bus_params = {};
+
+ hw_rev = (uintptr_t)of_device_get_match_data(&pdev->dev);
+ if (!hw_rev) {
+ dev_err(&pdev->dev, "OF data missing\n");
+ return -EINVAL;
+ }
+
+ size = sizeof(*ar_pci) + sizeof(*ar_ahb);
+ ar = ath10k_core_create(size, &pdev->dev, ATH10K_BUS_AHB,
+ hw_rev, &ath10k_ahb_hif_ops);
+ if (!ar) {
+ dev_err(&pdev->dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "ahb probe\n");
+
+ ar_pci = ath10k_pci_priv(ar);
+ ar_ahb = ath10k_ahb_priv(ar);
+
+ ar_ahb->pdev = pdev;
+ platform_set_drvdata(pdev, ar);
+
+ ret = ath10k_ahb_resource_init(ar);
+ if (ret)
+ goto err_core_destroy;
+
+ ar->dev_id = 0;
+ ar_pci->mem = ar_ahb->mem;
+ ar_pci->mem_len = ar_ahb->mem_len;
+ ar_pci->ar = ar;
+ ar_pci->ce.bus_ops = &ath10k_ahb_bus_ops;
+ ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr;
+ ar->ce_priv = &ar_pci->ce;
+
+ ret = ath10k_pci_setup_resource(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to setup resource: %d\n", ret);
+ goto err_resource_deinit;
+ }
+
+ ath10k_pci_init_napi(ar);
+
+ ret = ath10k_ahb_request_irq_intx(ar);
+ if (ret)
+ goto err_free_pipes;
+
+ ret = ath10k_ahb_prepare_device(ar);
+ if (ret)
+ goto err_free_irq;
+
+ ath10k_pci_ce_deinit(ar);
+
+ bus_params.dev_type = ATH10K_DEV_TYPE_LL;
+ bus_params.chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ if (bus_params.chip_id == 0xffffffff) {
+ ath10k_err(ar, "failed to get chip id\n");
+ ret = -ENODEV;
+ goto err_halt_device;
+ }
+
+ ret = ath10k_core_register(ar, &bus_params);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n", ret);
+ goto err_halt_device;
+ }
+
+ return 0;
+
+err_halt_device:
+ ath10k_ahb_halt_chip(ar);
+ ath10k_ahb_clock_disable(ar);
+
+err_free_irq:
+ ath10k_ahb_release_irq_intx(ar);
+
+err_free_pipes:
+ ath10k_pci_release_resource(ar);
+
+err_resource_deinit:
+ ath10k_ahb_resource_deinit(ar);
+
+err_core_destroy:
+ ath10k_core_destroy(ar);
+
+ return ret;
+}
+
+static void ath10k_ahb_remove(struct platform_device *pdev)
+{
+ struct ath10k *ar = platform_get_drvdata(pdev);
+
+ ath10k_dbg(ar, ATH10K_DBG_AHB, "ahb remove\n");
+
+ ath10k_core_unregister(ar);
+ ath10k_ahb_irq_disable(ar);
+ ath10k_ahb_release_irq_intx(ar);
+ ath10k_pci_release_resource(ar);
+ ath10k_ahb_halt_chip(ar);
+ ath10k_ahb_clock_disable(ar);
+ ath10k_ahb_resource_deinit(ar);
+ ath10k_core_destroy(ar);
+}
+
+static struct platform_driver ath10k_ahb_driver = {
+ .driver = {
+ .name = "ath10k_ahb",
+ .of_match_table = ath10k_ahb_of_match,
+ },
+ .probe = ath10k_ahb_probe,
+ .remove = ath10k_ahb_remove,
+};
+
+int ath10k_ahb_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&ath10k_ahb_driver);
+ if (ret)
+ printk(KERN_ERR "failed to register ath10k ahb driver: %d\n",
+ ret);
+ return ret;
+}
+
+void ath10k_ahb_exit(void)
+{
+ platform_driver_unregister(&ath10k_ahb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h
new file mode 100644
index 000000000000..cee11a3ae2a5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ahb.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved.
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _AHB_H_
+#define _AHB_H_
+
+#include <linux/platform_device.h>
+
+struct ath10k_ahb {
+ struct platform_device *pdev;
+ void __iomem *mem;
+ unsigned long mem_len;
+ void __iomem *gcc_mem;
+ void __iomem *tcsr_mem;
+
+ int irq;
+
+ struct clk *cmd_clk;
+ struct clk *ref_clk;
+ struct clk *rtc_clk;
+
+ struct reset_control *core_cold_rst;
+ struct reset_control *radio_cold_rst;
+ struct reset_control *radio_warm_rst;
+ struct reset_control *radio_srif_rst;
+ struct reset_control *cpu_init_rst;
+};
+
+#ifdef CONFIG_ATH10K_AHB
+
+#define ATH10K_GCC_REG_BASE 0x1800000
+#define ATH10K_GCC_REG_SIZE 0x60000
+
+#define ATH10K_TCSR_REG_BASE 0x1900000
+#define ATH10K_TCSR_REG_SIZE 0x80000
+
+#define ATH10K_AHB_GCC_FEPLL_PLL_DIV 0x2f020
+#define ATH10K_AHB_WIFI_SCRATCH_5_REG 0x4f014
+
+#define ATH10K_AHB_WLAN_CORE_ID_REG 0x82030
+
+#define ATH10K_AHB_TCSR_WIFI0_GLB_CFG 0x49000
+#define ATH10K_AHB_TCSR_WIFI1_GLB_CFG 0x49004
+#define TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK BIT(25)
+
+#define ATH10K_AHB_TCSR_WCSS0_HALTREQ 0x52000
+#define ATH10K_AHB_TCSR_WCSS1_HALTREQ 0x52010
+#define ATH10K_AHB_TCSR_WCSS0_HALTACK 0x52004
+#define ATH10K_AHB_TCSR_WCSS1_HALTACK 0x52014
+
+#define ATH10K_AHB_AXI_BUS_HALT_TIMEOUT 10 /* msec */
+#define AHB_AXI_BUS_HALT_REQ 1
+#define AHB_AXI_BUS_HALT_ACK 1
+
+#define ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK 1
+
+int ath10k_ahb_init(void);
+void ath10k_ahb_exit(void);
+
+#else /* CONFIG_ATH10K_AHB */
+
+static inline int ath10k_ahb_init(void)
+{
+ return 0;
+}
+
+static inline void ath10k_ahb_exit(void)
+{
+}
+
+#endif /* CONFIG_ATH10K_AHB */
+
+#endif /* _AHB_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
new file mode 100644
index 000000000000..52118867ecde
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/export.h>
+#include "bmi.h"
+#include "hif.h"
+#include "debug.h"
+#include "htc.h"
+#include "hw.h"
+
+void ath10k_bmi_start(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
+
+ ar->bmi.done_sent = false;
+}
+EXPORT_SYMBOL(ath10k_bmi_start);
+
+int ath10k_bmi_done(struct ath10k *ar)
+{
+ struct bmi_cmd cmd;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
+
+ if (ar->bmi.done_sent) {
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
+ return 0;
+ }
+
+ ar->bmi.done_sent = true;
+ cmd.id = __cpu_to_le32(BMI_DONE);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_get_target_info(struct ath10k *ar,
+ struct bmi_target_info *target_info)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
+ u32 resplen = sizeof(resp.get_target_info);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+ if (ret) {
+ ath10k_warn(ar, "unable to get target info from device\n");
+ return ret;
+ }
+
+ if (resplen < sizeof(resp.get_target_info)) {
+ ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
+ resplen);
+ return -EIO;
+ }
+
+ target_info->version = __le32_to_cpu(resp.get_target_info.version);
+ target_info->type = __le32_to_cpu(resp.get_target_info.type);
+
+ return 0;
+}
+
+#define TARGET_VERSION_SENTINAL 0xffffffffu
+
+int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
+ struct bmi_target_info *target_info)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
+ u32 resplen, ver_len;
+ __le32 tmp;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
+
+ /* Step 1: Read 4 bytes of the target info and check if it is
+ * the special sentinel version word or the first word in the
+ * version response.
+ */
+ resplen = sizeof(u32);
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
+ if (ret) {
+ ath10k_warn(ar, "unable to read from device\n");
+ return ret;
+ }
+
+ /* Some SDIO boards have a special sentinel byte before the real
+ * version response.
+ */
+ if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
+ /* Step 1b: Read the version length */
+ resplen = sizeof(u32);
+ ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
+ &resplen);
+ if (ret) {
+ ath10k_warn(ar, "unable to read from device\n");
+ return ret;
+ }
+ }
+
+ ver_len = __le32_to_cpu(tmp);
+
+ /* Step 2: Check the target info length */
+ if (ver_len != sizeof(resp.get_target_info)) {
+ ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
+ ver_len, sizeof(resp.get_target_info));
+ return -EINVAL;
+ }
+
+ /* Step 3: Read the rest of the version response */
+ resplen = sizeof(resp.get_target_info) - sizeof(u32);
+ ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
+ &resp.get_target_info.version,
+ &resplen);
+ if (ret) {
+ ath10k_warn(ar, "unable to read from device\n");
+ return ret;
+ }
+
+ target_info->version = __le32_to_cpu(resp.get_target_info.version);
+ target_info->type = __le32_to_cpu(resp.get_target_info.type);
+
+ return 0;
+}
+
+int ath10k_bmi_read_memory(struct ath10k *ar,
+ u32 address, void *buffer, u32 length)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
+ u32 rxlen;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
+ address, length);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ while (length) {
+ rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
+
+ cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
+ cmd.read_mem.addr = __cpu_to_le32(address);
+ cmd.read_mem.len = __cpu_to_le32(rxlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
+ &resp, &rxlen);
+ if (ret) {
+ ath10k_warn(ar, "unable to read from the device (%d)\n",
+ ret);
+ return ret;
+ }
+
+ memcpy(buffer, resp.read_mem.payload, rxlen);
+ address += rxlen;
+ buffer += rxlen;
+ length -= rxlen;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath10k_bmi_read_memory);
+
+int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
+{
+ struct bmi_cmd cmd;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI,
+ "bmi write soc register 0x%08x val 0x%08x\n",
+ address, reg_val);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "bmi write soc register command in progress\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
+ cmd.write_soc_reg.addr = __cpu_to_le32(address);
+ cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "Unable to write soc register to device: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
+ u32 resplen = sizeof(resp.read_soc_reg);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
+ address);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "bmi read soc register command in progress\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
+ cmd.read_soc_reg.addr = __cpu_to_le32(address);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+ if (ret) {
+ ath10k_warn(ar, "Unable to read soc register from device: %d\n",
+ ret);
+ return ret;
+ }
+
+ *reg_val = __le32_to_cpu(resp.read_soc_reg.value);
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
+ *reg_val);
+
+ return 0;
+}
+
+int ath10k_bmi_write_memory(struct ath10k *ar,
+ u32 address, const void *buffer, u32 length)
+{
+ struct bmi_cmd cmd;
+ u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
+ u32 txlen;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
+ address, length);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ while (length) {
+ txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
+
+ /* copy before roundup to avoid reading beyond buffer*/
+ memcpy(cmd.write_mem.payload, buffer, txlen);
+ txlen = roundup(txlen, 4);
+
+ cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
+ cmd.write_mem.addr = __cpu_to_le32(address);
+ cmd.write_mem.len = __cpu_to_le32(txlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
+ NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to the device (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* fixup roundup() so `length` zeroes out for last chunk */
+ txlen = min(txlen, length);
+
+ address += txlen;
+ buffer += txlen;
+ length -= txlen;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
+ u32 resplen = sizeof(resp.execute);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
+ address, param);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_EXECUTE);
+ cmd.execute.addr = __cpu_to_le32(address);
+ cmd.execute.param = __cpu_to_le32(param);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+ if (ret) {
+ ath10k_warn(ar, "unable to read from the device\n");
+ return ret;
+ }
+
+ if (resplen < sizeof(resp.execute)) {
+ ath10k_warn(ar, "invalid execute response length (%d)\n",
+ resplen);
+ return -EIO;
+ }
+
+ *result = __le32_to_cpu(resp.execute.result);
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
+
+ return 0;
+}
+
+static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length)
+{
+ struct bmi_cmd *cmd;
+ u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data);
+ u32 txlen;
+ int ret;
+ size_t buf_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%p length %d\n",
+ buffer, length);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE;
+ cmd = kzalloc(buf_len, GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ while (length) {
+ txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen);
+
+ WARN_ON_ONCE(txlen & 3);
+
+ cmd->id = __cpu_to_le32(BMI_LZ_DATA);
+ cmd->lz_data.len = __cpu_to_le32(txlen);
+ memcpy(cmd->lz_data.payload, buffer, txlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen,
+ NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to the device\n");
+ kfree(cmd);
+ return ret;
+ }
+
+ buffer += txlen;
+ length -= txlen;
+ }
+
+ kfree(cmd);
+
+ return 0;
+}
+
+int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
+{
+ struct bmi_cmd cmd;
+ u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
+ u32 txlen;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
+ buffer, length);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ while (length) {
+ txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
+
+ WARN_ON_ONCE(txlen & 3);
+
+ cmd.id = __cpu_to_le32(BMI_LZ_DATA);
+ cmd.lz_data.len = __cpu_to_le32(txlen);
+ memcpy(cmd.lz_data.payload, buffer, txlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
+ NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to the device\n");
+ return ret;
+ }
+
+ buffer += txlen;
+ length -= txlen;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
+{
+ struct bmi_cmd cmd;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
+ address);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
+ cmd.lz_start.addr = __cpu_to_le32(address);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_fast_download(struct ath10k *ar,
+ u32 address, const void *buffer, u32 length)
+{
+ u8 trailer[4] = {};
+ u32 head_len = rounddown(length, 4);
+ u32 trailer_len = length - head_len;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI,
+ "bmi fast download address 0x%x buffer 0x%p length %d\n",
+ address, buffer, length);
+
+ ret = ath10k_bmi_lz_stream_start(ar, address);
+ if (ret)
+ return ret;
+
+ /* copy the last word into a zero padded buffer */
+ if (trailer_len > 0)
+ memcpy(trailer, buffer + head_len, trailer_len);
+
+ if (ar->hw_params.bmi_large_size_download)
+ ret = ath10k_bmi_lz_data_large(ar, buffer, head_len);
+ else
+ ret = ath10k_bmi_lz_data(ar, buffer, head_len);
+
+ if (ret)
+ return ret;
+
+ if (trailer_len > 0)
+ ret = ath10k_bmi_lz_data(ar, trailer, 4);
+
+ if (ret != 0)
+ return ret;
+
+ /*
+ * Close compressed stream and open a new (fake) one.
+ * This serves mainly to flush Target caches.
+ */
+ ret = ath10k_bmi_lz_stream_start(ar, 0x00);
+
+ return ret;
+}
+
+int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
+{
+ struct bmi_cmd cmd;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
+ int ret;
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "bmi set start command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_SET_APP_START);
+ cmd.set_app_start.addr = __cpu_to_le32(address);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
new file mode 100644
index 000000000000..0685c0d2d4ea
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
+ */
+
+#ifndef _BMI_H_
+#define _BMI_H_
+
+#include "core.h"
+
+/*
+ * Bootloader Messaging Interface (BMI)
+ *
+ * BMI is a very simple messaging interface used during initialization
+ * to read memory, write memory, execute code, and to define an
+ * application entry PC.
+ *
+ * It is used to download an application to QCA988x, to provide
+ * patches to code that is already resident on QCA988x, and generally
+ * to examine and modify state. The Host has an opportunity to use
+ * BMI only once during bootup. Once the Host issues a BMI_DONE
+ * command, this opportunity ends.
+ *
+ * The Host writes BMI requests to mailbox0, and reads BMI responses
+ * from mailbox0. BMI requests all begin with a command
+ * (see below for specific commands), and are followed by
+ * command-specific data.
+ *
+ * Flow control:
+ * The Host can only issue a command once the Target gives it a
+ * "BMI Command Credit", using AR8K Counter #4. As soon as the
+ * Target has completed a command, it issues another BMI Command
+ * Credit (so the Host can issue the next command).
+ *
+ * BMI handles all required Target-side cache flushing.
+ */
+
+/* Maximum data size used for BMI transfers */
+#define BMI_MAX_DATA_SIZE 256
+
+/* len = cmd + addr + length */
+#define BMI_MAX_CMDBUF_SIZE (BMI_MAX_DATA_SIZE + \
+ sizeof(u32) + \
+ sizeof(u32) + \
+ sizeof(u32))
+
+/* Maximum data size used for large BMI transfers */
+#define BMI_MAX_LARGE_DATA_SIZE 2048
+
+/* len = cmd + addr + length */
+#define BMI_MAX_LARGE_CMDBUF_SIZE (BMI_MAX_LARGE_DATA_SIZE + \
+ sizeof(u32) + \
+ sizeof(u32) + \
+ sizeof(u32))
+
+/* BMI Commands */
+
+enum bmi_cmd_id {
+ BMI_NO_COMMAND = 0,
+ BMI_DONE = 1,
+ BMI_READ_MEMORY = 2,
+ BMI_WRITE_MEMORY = 3,
+ BMI_EXECUTE = 4,
+ BMI_SET_APP_START = 5,
+ BMI_READ_SOC_REGISTER = 6,
+ BMI_READ_SOC_WORD = 6,
+ BMI_WRITE_SOC_REGISTER = 7,
+ BMI_WRITE_SOC_WORD = 7,
+ BMI_GET_TARGET_ID = 8,
+ BMI_GET_TARGET_INFO = 8,
+ BMI_ROMPATCH_INSTALL = 9,
+ BMI_ROMPATCH_UNINSTALL = 10,
+ BMI_ROMPATCH_ACTIVATE = 11,
+ BMI_ROMPATCH_DEACTIVATE = 12,
+ BMI_LZ_STREAM_START = 13, /* should be followed by LZ_DATA */
+ BMI_LZ_DATA = 14,
+ BMI_NVRAM_PROCESS = 15,
+};
+
+#define BMI_NVRAM_SEG_NAME_SZ 16
+
+#define BMI_PARAM_GET_EEPROM_BOARD_ID 0x10
+#define BMI_PARAM_GET_FLASH_BOARD_ID 0x8000
+#define BMI_PARAM_FLASH_SECTION_ALL 0x10000
+
+/* Dual-band Extended Board ID */
+#define BMI_PARAM_GET_EXT_BOARD_ID 0x40000
+#define ATH10K_BMI_EXT_BOARD_ID_SUPPORT 0x40000
+
+#define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK 0x7c00
+#define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB 10
+
+#define ATH10K_BMI_CHIP_ID_FROM_OTP_MASK 0x18000
+#define ATH10K_BMI_CHIP_ID_FROM_OTP_LSB 15
+
+#define ATH10K_BMI_BOARD_ID_STATUS_MASK 0xff
+#define ATH10K_BMI_EBOARD_ID_STATUS_MASK 0xff
+
+struct bmi_cmd {
+ __le32 id; /* enum bmi_cmd_id */
+ union {
+ struct {
+ } done;
+ struct {
+ __le32 addr;
+ __le32 len;
+ } read_mem;
+ struct {
+ __le32 addr;
+ __le32 len;
+ u8 payload[];
+ } write_mem;
+ struct {
+ __le32 addr;
+ __le32 param;
+ } execute;
+ struct {
+ __le32 addr;
+ } set_app_start;
+ struct {
+ __le32 addr;
+ } read_soc_reg;
+ struct {
+ __le32 addr;
+ __le32 value;
+ } write_soc_reg;
+ struct {
+ } get_target_info;
+ struct {
+ __le32 rom_addr;
+ __le32 ram_addr; /* or value */
+ __le32 size;
+ __le32 activate; /* 0=install, but dont activate */
+ } rompatch_install;
+ struct {
+ __le32 patch_id;
+ } rompatch_uninstall;
+ struct {
+ __le32 count;
+ __le32 patch_ids[]; /* length of @count */
+ } rompatch_activate;
+ struct {
+ __le32 count;
+ __le32 patch_ids[]; /* length of @count */
+ } rompatch_deactivate;
+ struct {
+ __le32 addr;
+ } lz_start;
+ struct {
+ __le32 len; /* max BMI_MAX_DATA_SIZE */
+ u8 payload[]; /* length of @len */
+ } lz_data;
+ struct {
+ u8 name[BMI_NVRAM_SEG_NAME_SZ];
+ } nvram_process;
+ u8 payload[BMI_MAX_CMDBUF_SIZE];
+ };
+} __packed;
+
+union bmi_resp {
+ struct {
+ DECLARE_FLEX_ARRAY(u8, payload);
+ } read_mem;
+ struct {
+ __le32 result;
+ } execute;
+ struct {
+ __le32 value;
+ } read_soc_reg;
+ struct {
+ __le32 len;
+ __le32 version;
+ __le32 type;
+ } get_target_info;
+ struct {
+ __le32 patch_id;
+ } rompatch_install;
+ struct {
+ __le32 patch_id;
+ } rompatch_uninstall;
+ struct {
+ /* 0 = nothing executed
+ * otherwise = NVRAM segment return value
+ */
+ __le32 result;
+ } nvram_process;
+ u8 payload[BMI_MAX_CMDBUF_SIZE];
+} __packed;
+
+struct bmi_target_info {
+ u32 version;
+ u32 type;
+};
+
+struct bmi_segmented_file_header {
+ __le32 magic_num;
+ __le32 file_flags;
+ u8 data[];
+};
+
+struct bmi_segmented_metadata {
+ __le32 addr;
+ __le32 length;
+ u8 data[];
+};
+
+#define BMI_SGMTFILE_MAGIC_NUM 0x544d4753 /* "SGMT" */
+#define BMI_SGMTFILE_FLAG_COMPRESS 1
+
+/* Special values for bmi_segmented_metadata.length (all have high bit set) */
+
+/* end of segmented data */
+#define BMI_SGMTFILE_DONE 0xffffffff
+
+/* Board Data segment */
+#define BMI_SGMTFILE_BDDATA 0xfffffffe
+
+/* set beginning address */
+#define BMI_SGMTFILE_BEGINADDR 0xfffffffd
+
+/* immediate function execution */
+#define BMI_SGMTFILE_EXEC 0xfffffffc
+
+/* in jiffies */
+#define BMI_COMMUNICATION_TIMEOUT_HZ (3 * HZ)
+
+#define BMI_CE_NUM_TO_TARG 0
+#define BMI_CE_NUM_TO_HOST 1
+
+void ath10k_bmi_start(struct ath10k *ar);
+int ath10k_bmi_done(struct ath10k *ar);
+int ath10k_bmi_get_target_info(struct ath10k *ar,
+ struct bmi_target_info *target_info);
+int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
+ struct bmi_target_info *target_info);
+int ath10k_bmi_read_memory(struct ath10k *ar, u32 address,
+ void *buffer, u32 length);
+int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
+ const void *buffer, u32 length);
+
+#define ath10k_bmi_read32(ar, item, val) \
+ ({ \
+ int ret; \
+ u32 addr; \
+ __le32 tmp; \
+ \
+ addr = host_interest_item_address(HI_ITEM(item)); \
+ ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
+ if (!ret) \
+ *val = __le32_to_cpu(tmp); \
+ ret; \
+ })
+
+#define ath10k_bmi_write32(ar, item, val) \
+ ({ \
+ int ret; \
+ u32 address; \
+ __le32 v = __cpu_to_le32(val); \
+ \
+ address = host_interest_item_address(HI_ITEM(item)); \
+ ret = ath10k_bmi_write_memory(ar, address, \
+ (u8 *)&v, sizeof(v)); \
+ ret; \
+ })
+
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
+int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
+int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
+
+int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
+ const void *buffer, u32 length);
+int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val);
+int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val);
+int ath10k_bmi_set_start(struct ath10k *ar, u32 address);
+
+#endif /* _BMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
new file mode 100644
index 000000000000..7bbda46cfd93
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -0,0 +1,1971 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/export.h>
+#include "hif.h"
+#include "ce.h"
+#include "debug.h"
+
+/*
+ * Support for Copy Engine hardware, which is mainly used for
+ * communication between Host and Target over a PCIe interconnect.
+ */
+
+/*
+ * A single CopyEngine (CE) comprises two "rings":
+ * a source ring
+ * a destination ring
+ *
+ * Each ring consists of a number of descriptors which specify
+ * an address, length, and meta-data.
+ *
+ * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target)
+ * controls one ring and the other side controls the other ring.
+ * The source side chooses when to initiate a transfer and it
+ * chooses what to send (buffer address, length). The destination
+ * side keeps a supply of "anonymous receive buffers" available and
+ * it handles incoming data as it arrives (when the destination
+ * receives an interrupt).
+ *
+ * The sender may send a simple buffer (address/length) or it may
+ * send a small list of buffers. When a small list is sent, hardware
+ * "gathers" these and they end up in a single destination buffer
+ * with a single interrupt.
+ *
+ * There are several "contexts" managed by this layer -- more, it
+ * may seem -- than should be needed. These are provided mainly for
+ * maximum flexibility and especially to facilitate a simpler HIF
+ * implementation. There are per-CopyEngine recv, send, and watermark
+ * contexts. These are supplied by the caller when a recv, send,
+ * or watermark handler is established and they are echoed back to
+ * the caller when the respective callbacks are invoked. There is
+ * also a per-transfer context supplied by the caller when a buffer
+ * (or sendlist) is sent and when a buffer is enqueued for recv.
+ * These per-transfer contexts are echoed back to the caller when
+ * the buffer is sent/received.
+ */
+
+static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
+ struct ath10k_ce_pipe *ce_state)
+{
+ u32 ce_id = ce_state->id;
+ u32 addr = 0;
+
+ switch (ce_id) {
+ case 0:
+ addr = 0x00032000;
+ break;
+ case 3:
+ addr = 0x0003200C;
+ break;
+ case 4:
+ addr = 0x00032010;
+ break;
+ case 5:
+ addr = 0x00032014;
+ break;
+ case 7:
+ addr = 0x0003201C;
+ break;
+ default:
+ ath10k_warn(ar, "invalid CE id: %d", ce_id);
+ break;
+ }
+ return addr;
+}
+
+static inline unsigned int
+ath10k_set_ring_byte(unsigned int offset,
+ const struct ath10k_hw_ce_regs_addr_map *addr_map)
+{
+ return ((offset << addr_map->lsb) & addr_map->mask);
+}
+
+static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ return ce->bus_ops->read32(ar, offset);
+}
+
+static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ ce->bus_ops->write32(ar, offset, value);
+}
+
+static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dst_wr_index_addr, n);
+}
+
+static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ return ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dst_wr_index_addr);
+}
+
+static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_wr_index_addr, n);
+}
+
+static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ return ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_wr_index_addr);
+}
+
+static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar,
+ u32 ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK;
+}
+
+static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ u32 index;
+
+ if (ar->hw_params.rri_on_ddr &&
+ (ce_state->attr_flags & CE_ATTR_DIS_INTR))
+ index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id);
+ else
+ index = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->current_srri_addr);
+
+ return index;
+}
+
+static inline void
+ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
+ struct ath10k_ce_pipe *ce_state,
+ unsigned int value)
+{
+ ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value);
+}
+
+static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
+ u32 ce_id,
+ u64 addr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ u32 addr_lo = lower_32_bits(addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_base_addr_lo, addr_lo);
+
+ if (ce_state->ops->ce_set_src_ring_base_addr_hi) {
+ ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr,
+ addr);
+ }
+}
+
+static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ u64 addr)
+{
+ u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
+
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_base_addr_hi, addr_hi);
+}
+
+static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_size_addr, n);
+}
+
+static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+
+ u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ctrl_regs->addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
+ (ctrl1_addr & ~(ctrl_regs->dmax->mask)) |
+ ath10k_set_ring_byte(n, ctrl_regs->dmax));
+}
+
+static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+
+ u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ctrl_regs->addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
+ (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
+ ath10k_set_ring_byte(n, ctrl_regs->src_ring));
+}
+
+static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+
+ u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ctrl_regs->addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
+ (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
+ ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
+}
+
+static inline
+ u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) &
+ CE_DDR_RRI_MASK;
+}
+
+static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ u32 index;
+
+ if (ar->hw_params.rri_on_ddr &&
+ (ce_state->attr_flags & CE_ATTR_DIS_INTR))
+ index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id);
+ else
+ index = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->current_drri_addr);
+
+ return index;
+}
+
+static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
+ u32 ce_id,
+ u64 addr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ u32 addr_lo = lower_32_bits(addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dr_base_addr_lo, addr_lo);
+
+ if (ce_state->ops->ce_set_dest_ring_base_addr_hi) {
+ ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr,
+ addr);
+ }
+}
+
+static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ u64 addr)
+{
+ u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
+ u32 reg_value;
+
+ reg_value = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dr_base_addr_hi);
+ reg_value &= ~CE_DESC_ADDR_HI_MASK;
+ reg_value |= addr_hi;
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dr_base_addr_hi, reg_value);
+}
+
+static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dr_size_addr, n);
+}
+
+static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
+ u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
+ (addr & ~(srcr_wm->wm_high->mask)) |
+ (ath10k_set_ring_byte(n, srcr_wm->wm_high)));
+}
+
+static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
+ u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
+ (addr & ~(srcr_wm->wm_low->mask)) |
+ (ath10k_set_ring_byte(n, srcr_wm->wm_low)));
+}
+
+static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
+ u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
+ (addr & ~(dstr_wm->wm_high->mask)) |
+ (ath10k_set_ring_byte(n, dstr_wm->wm_high)));
+}
+
+static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
+ u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
+ (addr & ~(dstr_wm->wm_low->mask)) |
+ (ath10k_set_ring_byte(n, dstr_wm->wm_low)));
+}
+
+static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
+
+ u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->host_ie_addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
+ host_ie_addr | host_ie->copy_complete->mask);
+}
+
+static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
+
+ u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->host_ie_addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
+ host_ie_addr & ~(host_ie->copy_complete->mask));
+}
+
+static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+
+ u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->host_ie_addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
+ host_ie_addr & ~(wm_regs->wm_mask));
+}
+
+static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ const struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
+
+ u32 misc_ie_addr = ath10k_ce_read32(ar,
+ ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
+
+ ath10k_ce_write32(ar,
+ ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
+ misc_ie_addr & ~(misc_regs->err_mask));
+}
+
+static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int mask)
+{
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
+}
+
+/*
+ * Guts of ath10k_ce_send.
+ * The caller takes responsibility for any needed locking.
+ */
+static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ struct ce_desc *desc, sdesc;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ unsigned int write_index = src_ring->write_index;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ u32 desc_flags = 0;
+ int ret = 0;
+
+ if (nbytes > ce_state->src_sz_max)
+ ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
+ __func__, nbytes, ce_state->src_sz_max);
+
+ if (unlikely(CE_RING_DELTA(nentries_mask,
+ write_index, sw_index - 1) <= 0)) {
+ ret = -ENOSR;
+ goto exit;
+ }
+
+ desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
+ write_index);
+
+ desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
+
+ if (flags & CE_SEND_FLAG_GATHER)
+ desc_flags |= CE_DESC_FLAGS_GATHER;
+ if (flags & CE_SEND_FLAG_BYTE_SWAP)
+ desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
+
+ sdesc.addr = __cpu_to_le32(buffer);
+ sdesc.nbytes = __cpu_to_le16(nbytes);
+ sdesc.flags = __cpu_to_le16(desc_flags);
+
+ *desc = sdesc;
+
+ src_ring->per_transfer_context[write_index] = per_transfer_context;
+
+ /* Update Source Ring Write Index */
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+
+ /* WORKAROUND */
+ if (!(flags & CE_SEND_FLAG_GATHER))
+ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+
+ src_ring->write_index = write_index;
+exit:
+ return ret;
+}
+
+static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ struct ce_desc_64 *desc, sdesc;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index = src_ring->write_index;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ __le32 *addr;
+ u32 desc_flags = 0;
+ int ret = 0;
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ return -ESHUTDOWN;
+
+ if (nbytes > ce_state->src_sz_max)
+ ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
+ __func__, nbytes, ce_state->src_sz_max);
+
+ if (ar->hw_params.rri_on_ddr)
+ sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id);
+ else
+ sw_index = src_ring->sw_index;
+
+ if (unlikely(CE_RING_DELTA(nentries_mask,
+ write_index, sw_index - 1) <= 0)) {
+ ret = -ENOSR;
+ goto exit;
+ }
+
+ desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
+ write_index);
+
+ desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
+
+ if (flags & CE_SEND_FLAG_GATHER)
+ desc_flags |= CE_DESC_FLAGS_GATHER;
+
+ if (flags & CE_SEND_FLAG_BYTE_SWAP)
+ desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
+
+ addr = (__le32 *)&sdesc.addr;
+
+ flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK;
+ addr[0] = __cpu_to_le32(buffer);
+ addr[1] = __cpu_to_le32(flags);
+ if (flags & CE_SEND_FLAG_GATHER)
+ addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
+ else
+ addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
+
+ sdesc.nbytes = __cpu_to_le16(nbytes);
+ sdesc.flags = __cpu_to_le16(desc_flags);
+
+ *desc = sdesc;
+
+ src_ring->per_transfer_context[write_index] = per_transfer_context;
+
+ /* Update Source Ring Write Index */
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+
+ if (!(flags & CE_SEND_FLAG_GATHER)) {
+ if (ar->hw_params.shadow_reg_support)
+ ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
+ write_index);
+ else
+ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
+ write_index);
+ }
+
+ src_ring->write_index = write_index;
+exit:
+ return ret;
+}
+
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
+ buffer, nbytes, transfer_id, flags);
+}
+EXPORT_SYMBOL(ath10k_ce_send_nolock);
+
+void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_ring *src_ring = pipe->src_ring;
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ lockdep_assert_held(&ce->ce_lock);
+
+ /*
+ * This function must be called only if there is an incomplete
+ * scatter-gather transfer (before index register is updated)
+ * that needs to be cleaned up.
+ */
+ if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
+ return;
+
+ if (WARN_ON_ONCE(src_ring->write_index ==
+ ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
+ return;
+
+ src_ring->write_index--;
+ src_ring->write_index &= src_ring->nentries_mask;
+
+ src_ring->per_transfer_context[src_ring->write_index] = NULL;
+}
+EXPORT_SYMBOL(__ath10k_ce_send_revert);
+
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int ret;
+
+ spin_lock_bh(&ce->ce_lock);
+ ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
+ buffer, nbytes, transfer_id, flags);
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath10k_ce_send);
+
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int delta;
+
+ spin_lock_bh(&ce->ce_lock);
+ delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
+ pipe->src_ring->write_index,
+ pipe->src_ring->sw_index - 1);
+ spin_unlock_bh(&ce->ce_lock);
+
+ return delta;
+}
+EXPORT_SYMBOL(ath10k_ce_num_free_src_entries);
+
+int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ unsigned int sw_index = dest_ring->sw_index;
+
+ lockdep_assert_held(&ce->ce_lock);
+
+ return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
+}
+EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs);
+
+static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ unsigned int sw_index = dest_ring->sw_index;
+ struct ce_desc *base = dest_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ lockdep_assert_held(&ce->ce_lock);
+
+ if ((pipe->id != 5) &&
+ CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+ return -ENOSPC;
+
+ desc->addr = __cpu_to_le32(paddr);
+ desc->nbytes = 0;
+
+ dest_ring->per_transfer_context[write_index] = ctx;
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+ dest_ring->write_index = write_index;
+
+ return 0;
+}
+
+static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
+ void *ctx,
+ dma_addr_t paddr)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ unsigned int sw_index = dest_ring->sw_index;
+ struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_DEST_RING_TO_DESC_64(base, write_index);
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ lockdep_assert_held(&ce->ce_lock);
+
+ if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+ return -ENOSPC;
+
+ desc->addr = __cpu_to_le64(paddr);
+ desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK);
+
+ desc->nbytes = 0;
+
+ dest_ring->per_transfer_context[write_index] = ctx;
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+ dest_ring->write_index = write_index;
+
+ return 0;
+}
+
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ u32 ctrl_addr = pipe->ctrl_addr;
+ u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+
+ /* Prevent CE ring stuck issue that will occur when ring is full.
+ * Make sure that write index is 1 less than read index.
+ */
+ if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index)
+ nentries -= 1;
+
+ write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
+ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+ dest_ring->write_index = write_index;
+}
+EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx);
+
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int ret;
+
+ spin_lock_bh(&ce->ce_lock);
+ ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath10k_ce_rx_post_buf);
+
+/*
+ * Guts of ath10k_ce_completed_recv_next.
+ * The caller takes responsibility for any necessary locking.
+ */
+static int
+ _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ unsigned int *nbytesp)
+{
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int sw_index = dest_ring->sw_index;
+
+ struct ce_desc *base = dest_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
+ struct ce_desc sdesc;
+ u16 nbytes;
+
+ /* Copy in one go for performance reasons */
+ sdesc = *desc;
+
+ nbytes = __le16_to_cpu(sdesc.nbytes);
+ if (nbytes == 0) {
+ /*
+ * This closes a relatively unusual race where the Host
+ * sees the updated DRRI before the update to the
+ * corresponding descriptor has completed. We treat this
+ * as a descriptor that is not yet done.
+ */
+ return -EIO;
+ }
+
+ desc->nbytes = 0;
+
+ /* Return data from completed destination descriptor */
+ *nbytesp = nbytes;
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+ * So update transfer context all CEs except CE5.
+ */
+ if (ce_state->id != 5)
+ dest_ring->per_transfer_context[sw_index] = NULL;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+
+ return 0;
+}
+
+static int
+_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ unsigned int *nbytesp)
+{
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int sw_index = dest_ring->sw_index;
+ struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_DEST_RING_TO_DESC_64(base, sw_index);
+ struct ce_desc_64 sdesc;
+ u16 nbytes;
+
+ /* Copy in one go for performance reasons */
+ sdesc = *desc;
+
+ nbytes = __le16_to_cpu(sdesc.nbytes);
+ if (nbytes == 0) {
+ /* This closes a relatively unusual race where the Host
+ * sees the updated DRRI before the update to the
+ * corresponding descriptor has completed. We treat this
+ * as a descriptor that is not yet done.
+ */
+ return -EIO;
+ }
+
+ desc->nbytes = 0;
+
+ /* Return data from completed destination descriptor */
+ *nbytesp = nbytes;
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+ * So update transfer context all CEs except CE5.
+ */
+ if (ce_state->id != 5)
+ dest_ring->per_transfer_context[sw_index] = NULL;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+
+ return 0;
+}
+
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_ctx,
+ unsigned int *nbytesp)
+{
+ return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
+ per_transfer_ctx,
+ nbytesp);
+}
+EXPORT_SYMBOL(ath10k_ce_completed_recv_next_nolock);
+
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ unsigned int *nbytesp)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int ret;
+
+ spin_lock_bh(&ce->ce_lock);
+ ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
+ per_transfer_contextp,
+ nbytesp);
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath10k_ce_completed_recv_next);
+
+static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp)
+{
+ struct ath10k_ce_ring *dest_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int ret;
+ struct ath10k *ar;
+ struct ath10k_ce *ce;
+
+ dest_ring = ce_state->dest_ring;
+
+ if (!dest_ring)
+ return -EIO;
+
+ ar = ce_state->ar;
+ ce = ath10k_ce_priv(ar);
+
+ spin_lock_bh(&ce->ce_lock);
+
+ nentries_mask = dest_ring->nentries_mask;
+ sw_index = dest_ring->sw_index;
+ write_index = dest_ring->write_index;
+ if (write_index != sw_index) {
+ struct ce_desc *base = dest_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
+
+ /* Return data from completed destination descriptor */
+ *bufferp = __le32_to_cpu(desc->addr);
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ dest_ring->per_transfer_context[sw_index] = NULL;
+ desc->nbytes = 0;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+
+static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp)
+{
+ struct ath10k_ce_ring *dest_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int ret;
+ struct ath10k *ar;
+ struct ath10k_ce *ce;
+
+ dest_ring = ce_state->dest_ring;
+
+ if (!dest_ring)
+ return -EIO;
+
+ ar = ce_state->ar;
+ ce = ath10k_ce_priv(ar);
+
+ spin_lock_bh(&ce->ce_lock);
+
+ nentries_mask = dest_ring->nentries_mask;
+ sw_index = dest_ring->sw_index;
+ write_index = dest_ring->write_index;
+ if (write_index != sw_index) {
+ struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_DEST_RING_TO_DESC_64(base, sw_index);
+
+ /* Return data from completed destination descriptor */
+ *bufferp = __le64_to_cpu(desc->addr);
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ dest_ring->per_transfer_context[sw_index] = NULL;
+ desc->nbytes = 0;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp)
+{
+ return ce_state->ops->ce_revoke_recv_next(ce_state,
+ per_transfer_contextp,
+ bufferp);
+}
+EXPORT_SYMBOL(ath10k_ce_revoke_recv_next);
+
+/*
+ * Guts of ath10k_ce_completed_send_next.
+ * The caller takes responsibility for any necessary locking.
+ */
+static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp)
+{
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ struct ath10k *ar = ce_state->ar;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ unsigned int read_index;
+ struct ce_desc *desc;
+
+ if (src_ring->hw_index == sw_index) {
+ /*
+ * The SW completion index has caught up with the cached
+ * version of the HW completion index.
+ * Update the cached HW completion index to see whether
+ * the SW has really caught up to the HW, or if the cached
+ * value of the HW index has become stale.
+ */
+
+ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ if (read_index == 0xffffffff)
+ return -ENODEV;
+
+ read_index &= nentries_mask;
+ src_ring->hw_index = read_index;
+ }
+
+ if (ar->hw_params.rri_on_ddr)
+ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ else
+ read_index = src_ring->hw_index;
+
+ if (read_index == sw_index)
+ return -EIO;
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
+ desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
+ sw_index);
+ desc->nbytes = 0;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+
+ return 0;
+}
+
+static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp)
+{
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ struct ath10k *ar = ce_state->ar;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ unsigned int read_index;
+ struct ce_desc_64 *desc;
+
+ if (src_ring->hw_index == sw_index) {
+ /*
+ * The SW completion index has caught up with the cached
+ * version of the HW completion index.
+ * Update the cached HW completion index to see whether
+ * the SW has really caught up to the HW, or if the cached
+ * value of the HW index has become stale.
+ */
+
+ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ if (read_index == 0xffffffff)
+ return -ENODEV;
+
+ read_index &= nentries_mask;
+ src_ring->hw_index = read_index;
+ }
+
+ if (ar->hw_params.rri_on_ddr)
+ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ else
+ read_index = src_ring->hw_index;
+
+ if (read_index == sw_index)
+ return -EIO;
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
+ desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
+ sw_index);
+ desc->nbytes = 0;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+
+ return 0;
+}
+
+int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp)
+{
+ return ce_state->ops->ce_completed_send_next_nolock(ce_state,
+ per_transfer_contextp);
+}
+EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);
+
+static void ath10k_ce_extract_desc_data(struct ath10k *ar,
+ struct ath10k_ce_ring *src_ring,
+ u32 sw_index,
+ dma_addr_t *bufferp,
+ u32 *nbytesp,
+ u32 *transfer_idp)
+{
+ struct ce_desc *base = src_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
+
+ /* Return data from completed source descriptor */
+ *bufferp = __le32_to_cpu(desc->addr);
+ *nbytesp = __le16_to_cpu(desc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(desc->flags),
+ CE_DESC_FLAGS_META_DATA);
+}
+
+static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
+ struct ath10k_ce_ring *src_ring,
+ u32 sw_index,
+ dma_addr_t *bufferp,
+ u32 *nbytesp,
+ u32 *transfer_idp)
+{
+ struct ce_desc_64 *base = src_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_SRC_RING_TO_DESC_64(base, sw_index);
+
+ /* Return data from completed source descriptor */
+ *bufferp = __le64_to_cpu(desc->addr);
+ *nbytesp = __le16_to_cpu(desc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(desc->flags),
+ CE_DESC_FLAGS_META_DATA);
+}
+
+/* NB: Modeled after ath10k_ce_completed_send_next */
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp)
+{
+ struct ath10k_ce_ring *src_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int ret;
+ struct ath10k *ar;
+ struct ath10k_ce *ce;
+
+ src_ring = ce_state->src_ring;
+
+ if (!src_ring)
+ return -EIO;
+
+ ar = ce_state->ar;
+ ce = ath10k_ce_priv(ar);
+
+ spin_lock_bh(&ce->ce_lock);
+
+ nentries_mask = src_ring->nentries_mask;
+ sw_index = src_ring->sw_index;
+ write_index = src_ring->write_index;
+
+ if (write_index != sw_index) {
+ ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
+ bufferp, nbytesp,
+ transfer_idp);
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath10k_ce_cancel_send_next);
+
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int ret;
+
+ spin_lock_bh(&ce->ce_lock);
+ ret = ath10k_ce_completed_send_next_nolock(ce_state,
+ per_transfer_contextp);
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath10k_ce_completed_send_next);
+
+/*
+ * Guts of interrupt handler for per-engine interrupts on a particular CE.
+ *
+ * Invokes registered callbacks for recv_complete,
+ * send_complete, and watermarks.
+ */
+void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+
+ /*
+ * Clear before handling
+ *
+ * Misc CE interrupts are not being handled, but still need
+ * to be cleared.
+ *
+ * NOTE: When the last copy engine interrupt is cleared the
+ * hardware will go to sleep. Once this happens any access to
+ * the CE registers can cause a hardware fault.
+ */
+ ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
+ wm_regs->cc_mask | wm_regs->wm_mask);
+
+ if (ce_state->recv_cb)
+ ce_state->recv_cb(ce_state);
+
+ if (ce_state->send_cb)
+ ce_state->send_cb(ce_state);
+}
+EXPORT_SYMBOL(ath10k_ce_per_engine_service);
+
+/*
+ * Handler for per-engine interrupts on ALL active CEs.
+ * This is used in cases where the system is sharing a
+ * single interrupt for all CEs
+ */
+
+void ath10k_ce_per_engine_service_any(struct ath10k *ar)
+{
+ int ce_id;
+ u32 intr_summary;
+
+ intr_summary = ath10k_ce_interrupt_summary(ar);
+
+ for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
+ if (intr_summary & (1 << ce_id))
+ intr_summary &= ~(1 << ce_id);
+ else
+ /* no intr pending on this CE */
+ continue;
+
+ ath10k_ce_per_engine_service(ar, ce_id);
+ }
+}
+EXPORT_SYMBOL(ath10k_ce_per_engine_service_any);
+
+/*
+ * Adjust interrupts for the copy complete handler.
+ * If it's needed for either send or recv, then unmask
+ * this interrupt; otherwise, mask it.
+ *
+ * Called with ce_lock held.
+ */
+static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
+{
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ struct ath10k *ar = ce_state->ar;
+ bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
+
+ if ((!disable_copy_compl_intr) &&
+ (ce_state->send_cb || ce_state->recv_cb))
+ ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
+ else
+ ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+
+ ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
+}
+
+void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state;
+ u32 ctrl_addr;
+
+ ce_state = &ce->ce_states[ce_id];
+ if (ce_state->attr_flags & CE_ATTR_POLL)
+ return;
+
+ ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+ ath10k_ce_error_intr_disable(ar, ctrl_addr);
+ ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
+}
+EXPORT_SYMBOL(ath10k_ce_disable_interrupt);
+
+void ath10k_ce_disable_interrupts(struct ath10k *ar)
+{
+ int ce_id;
+
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ ath10k_ce_disable_interrupt(ar, ce_id);
+}
+EXPORT_SYMBOL(ath10k_ce_disable_interrupts);
+
+void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state;
+
+ ce_state = &ce->ce_states[ce_id];
+ if (ce_state->attr_flags & CE_ATTR_POLL)
+ return;
+
+ ath10k_ce_per_engine_handler_adjust(ce_state);
+}
+EXPORT_SYMBOL(ath10k_ce_enable_interrupt);
+
+void ath10k_ce_enable_interrupts(struct ath10k *ar)
+{
+ int ce_id;
+
+ /* Enable interrupts for copy engine that
+ * are not using polling mode.
+ */
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ ath10k_ce_enable_interrupt(ar, ce_id);
+}
+EXPORT_SYMBOL(ath10k_ce_enable_interrupts);
+
+static int ath10k_ce_init_src_ring(struct ath10k *ar,
+ unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ nentries = roundup_pow_of_two(attr->src_nentries);
+
+ if (ar->hw_params.target_64bit)
+ memset(src_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc_64));
+ else
+ memset(src_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc));
+
+ src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ src_ring->sw_index &= src_ring->nentries_mask;
+ src_ring->hw_index = src_ring->sw_index;
+
+ src_ring->write_index =
+ ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
+ src_ring->write_index &= src_ring->nentries_mask;
+
+ ath10k_ce_src_ring_base_addr_set(ar, ce_id,
+ src_ring->base_addr_ce_space);
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
+ ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot init ce src ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, src_ring->base_addr_owner_space);
+
+ return 0;
+}
+
+static int ath10k_ce_init_dest_ring(struct ath10k *ar,
+ unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+
+ if (ar->hw_params.target_64bit)
+ memset(dest_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc_64));
+ else
+ memset(dest_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc));
+
+ dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+ dest_ring->sw_index &= dest_ring->nentries_mask;
+ dest_ring->write_index =
+ ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+ dest_ring->write_index &= dest_ring->nentries_mask;
+
+ ath10k_ce_dest_ring_base_addr_set(ar, ce_id,
+ dest_ring->base_addr_ce_space);
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot ce dest ring id %d entries %d base_addr %p\n",
+ ce_id, nentries, dest_ring->base_addr_owner_space);
+
+ return 0;
+}
+
+static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
+ struct ath10k_ce_ring *src_ring,
+ u32 nentries)
+{
+ src_ring->shadow_base_unaligned = kcalloc(nentries,
+ sizeof(struct ce_desc_64),
+ GFP_KERNEL);
+ if (!src_ring->shadow_base_unaligned)
+ return -ENOMEM;
+
+ src_ring->shadow_base = (struct ce_desc_64 *)
+ PTR_ALIGN(src_ring->shadow_base_unaligned,
+ CE_DESC_RING_ALIGN);
+ return 0;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *src_ring;
+ u32 nentries = attr->src_nentries;
+ dma_addr_t base_addr;
+ int ret;
+
+ nentries = roundup_pow_of_two(nentries);
+
+ src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
+ nentries), GFP_KERNEL);
+ if (src_ring == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ src_ring->nentries = nentries;
+ src_ring->nentries_mask = nentries - 1;
+
+ /*
+ * Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ src_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
+ if (!src_ring->base_addr_owner_space_unaligned) {
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ src_ring->base_addr_ce_space_unaligned = base_addr;
+
+ src_ring->base_addr_owner_space =
+ PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ src_ring->base_addr_ce_space =
+ ALIGN(src_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ if (ar->hw_params.shadow_reg_support) {
+ ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
+ if (ret) {
+ dma_free_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space_unaligned,
+ base_addr);
+ kfree(src_ring);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return src_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *src_ring;
+ u32 nentries = attr->src_nentries;
+ dma_addr_t base_addr;
+ int ret;
+
+ nentries = roundup_pow_of_two(nentries);
+
+ src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
+ nentries), GFP_KERNEL);
+ if (!src_ring)
+ return ERR_PTR(-ENOMEM);
+
+ src_ring->nentries = nentries;
+ src_ring->nentries_mask = nentries - 1;
+
+ /* Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ src_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc_64) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
+ if (!src_ring->base_addr_owner_space_unaligned) {
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ src_ring->base_addr_ce_space_unaligned = base_addr;
+
+ src_ring->base_addr_owner_space =
+ PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ src_ring->base_addr_ce_space =
+ ALIGN(src_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ if (ar->hw_params.shadow_reg_support) {
+ ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
+ if (ret) {
+ dma_free_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc_64) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space_unaligned,
+ base_addr);
+ kfree(src_ring);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return src_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *dest_ring;
+ u32 nentries;
+ dma_addr_t base_addr;
+
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+
+ dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
+ nentries), GFP_KERNEL);
+ if (dest_ring == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ dest_ring->nentries = nentries;
+ dest_ring->nentries_mask = nentries - 1;
+
+ /*
+ * Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ dest_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
+ if (!dest_ring->base_addr_owner_space_unaligned) {
+ kfree(dest_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dest_ring->base_addr_ce_space_unaligned = base_addr;
+
+ dest_ring->base_addr_owner_space =
+ PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ dest_ring->base_addr_ce_space =
+ ALIGN(dest_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return dest_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *dest_ring;
+ u32 nentries;
+ dma_addr_t base_addr;
+
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+
+ dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
+ nentries), GFP_KERNEL);
+ if (!dest_ring)
+ return ERR_PTR(-ENOMEM);
+
+ dest_ring->nentries = nentries;
+ dest_ring->nentries_mask = nentries - 1;
+
+ /* Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ dest_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc_64) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
+ if (!dest_ring->base_addr_owner_space_unaligned) {
+ kfree(dest_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dest_ring->base_addr_ce_space_unaligned = base_addr;
+
+ /* Correctly initialize memory to 0 to prevent garbage
+ * data crashing system when download firmware
+ */
+ dest_ring->base_addr_owner_space =
+ PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ dest_ring->base_addr_ce_space =
+ ALIGN(dest_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return dest_ring;
+}
+
+/*
+ * Initialize a Copy Engine based on caller-supplied attributes.
+ * This may be called once to initialize both source and destination
+ * rings or it may be called twice for separate source and destination
+ * initialization. It may be that only one side or the other is
+ * initialized by software/firmware.
+ */
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ int ret;
+
+ if (attr->src_nentries) {
+ ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
+ if (ret) {
+ ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
+ ce_id, ret);
+ return ret;
+ }
+ }
+
+ if (attr->dest_nentries) {
+ ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
+ if (ret) {
+ ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
+ ce_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath10k_ce_init_pipe);
+
+static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
+{
+ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ ath10k_ce_src_ring_base_addr_set(ar, ce_id, 0);
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
+{
+ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ ath10k_ce_dest_ring_base_addr_set(ar, ce_id, 0);
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
+{
+ ath10k_ce_deinit_src_ring(ar, ce_id);
+ ath10k_ce_deinit_dest_ring(ar, ce_id);
+}
+EXPORT_SYMBOL(ath10k_ce_deinit_pipe);
+
+static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+
+ if (ce_state->src_ring) {
+ if (ar->hw_params.shadow_reg_support)
+ kfree(ce_state->src_ring->shadow_base_unaligned);
+ dma_free_coherent(ar->dev,
+ (ce_state->src_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->src_ring->base_addr_owner_space,
+ ce_state->src_ring->base_addr_ce_space);
+ kfree(ce_state->src_ring);
+ }
+
+ if (ce_state->dest_ring) {
+ dma_free_coherent(ar->dev,
+ (ce_state->dest_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->dest_ring->base_addr_owner_space,
+ ce_state->dest_ring->base_addr_ce_space);
+ kfree(ce_state->dest_ring);
+ }
+
+ ce_state->src_ring = NULL;
+ ce_state->dest_ring = NULL;
+}
+
+static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+
+ if (ce_state->src_ring) {
+ if (ar->hw_params.shadow_reg_support)
+ kfree(ce_state->src_ring->shadow_base_unaligned);
+ dma_free_coherent(ar->dev,
+ (ce_state->src_ring->nentries *
+ sizeof(struct ce_desc_64) +
+ CE_DESC_RING_ALIGN),
+ ce_state->src_ring->base_addr_owner_space,
+ ce_state->src_ring->base_addr_ce_space);
+ kfree(ce_state->src_ring);
+ }
+
+ if (ce_state->dest_ring) {
+ dma_free_coherent(ar->dev,
+ (ce_state->dest_ring->nentries *
+ sizeof(struct ce_desc_64) +
+ CE_DESC_RING_ALIGN),
+ ce_state->dest_ring->base_addr_owner_space,
+ ce_state->dest_ring->base_addr_ce_space);
+ kfree(ce_state->dest_ring);
+ }
+
+ ce_state->src_ring = NULL;
+ ce_state->dest_ring = NULL;
+}
+
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+
+ ce_state->ops->ce_free_pipe(ar, ce_id);
+}
+EXPORT_SYMBOL(ath10k_ce_free_pipe);
+
+void ath10k_ce_dump_registers(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_crash_data ce_data;
+ u32 addr, id;
+
+ lockdep_assert_held(&ar->dump_mutex);
+
+ ath10k_err(ar, "Copy Engine register dump:\n");
+
+ spin_lock_bh(&ce->ce_lock);
+ for (id = 0; id < CE_COUNT; id++) {
+ addr = ath10k_ce_base_address(ar, id);
+ ce_data.base_addr = cpu_to_le32(addr);
+
+ ce_data.src_wr_idx =
+ cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
+ ce_data.src_r_idx =
+ cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
+ ce_data.dst_wr_idx =
+ cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
+ ce_data.dst_r_idx =
+ cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));
+
+ if (crash_data)
+ crash_data->ce_crash_data[id] = ce_data;
+
+ ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
+ le32_to_cpu(ce_data.base_addr),
+ le32_to_cpu(ce_data.src_wr_idx),
+ le32_to_cpu(ce_data.src_r_idx),
+ le32_to_cpu(ce_data.dst_wr_idx),
+ le32_to_cpu(ce_data.dst_r_idx));
+ }
+
+ spin_unlock_bh(&ce->ce_lock);
+}
+EXPORT_SYMBOL(ath10k_ce_dump_registers);
+
+static const struct ath10k_ce_ops ce_ops = {
+ .ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
+ .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
+ .ce_rx_post_buf = __ath10k_ce_rx_post_buf,
+ .ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
+ .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
+ .ce_extract_desc_data = ath10k_ce_extract_desc_data,
+ .ce_free_pipe = _ath10k_ce_free_pipe,
+ .ce_send_nolock = _ath10k_ce_send_nolock,
+ .ce_set_src_ring_base_addr_hi = NULL,
+ .ce_set_dest_ring_base_addr_hi = NULL,
+ .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock,
+};
+
+static const struct ath10k_ce_ops ce_64_ops = {
+ .ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
+ .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
+ .ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
+ .ce_completed_recv_next_nolock =
+ _ath10k_ce_completed_recv_next_nolock_64,
+ .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
+ .ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
+ .ce_free_pipe = _ath10k_ce_free_pipe_64,
+ .ce_send_nolock = _ath10k_ce_send_nolock_64,
+ .ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi,
+ .ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi,
+ .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64,
+};
+
+static void ath10k_ce_set_ops(struct ath10k *ar,
+ struct ath10k_ce_pipe *ce_state)
+{
+ switch (ar->hw_rev) {
+ case ATH10K_HW_WCN3990:
+ ce_state->ops = &ce_64_ops;
+ break;
+ default:
+ ce_state->ops = &ce_ops;
+ break;
+ }
+}
+
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ int ret;
+
+ ath10k_ce_set_ops(ar, ce_state);
+ /* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
+ * additional TX locking checks.
+ *
+ * For the lack of a better place do the check here.
+ */
+ BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+ BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+ BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
+
+ if (attr->src_nentries)
+ ce_state->send_cb = attr->send_cb;
+
+ if (attr->dest_nentries)
+ ce_state->recv_cb = attr->recv_cb;
+
+ if (attr->src_nentries) {
+ ce_state->src_ring =
+ ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
+ if (IS_ERR(ce_state->src_ring)) {
+ ret = PTR_ERR(ce_state->src_ring);
+ ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
+ ce_id, ret);
+ ce_state->src_ring = NULL;
+ return ret;
+ }
+ }
+
+ if (attr->dest_nentries) {
+ ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
+ ce_id,
+ attr);
+ if (IS_ERR(ce_state->dest_ring)) {
+ ret = PTR_ERR(ce_state->dest_ring);
+ ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
+ ce_id, ret);
+ ce_state->dest_ring = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath10k_ce_alloc_pipe);
+
+void ath10k_ce_alloc_rri(struct ath10k *ar)
+{
+ int i;
+ u32 value;
+ u32 ctrl1_regs;
+ u32 ce_base_addr;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ ce->vaddr_rri = dma_alloc_coherent(ar->dev,
+ (CE_COUNT * sizeof(u32)),
+ &ce->paddr_rri, GFP_KERNEL);
+
+ if (!ce->vaddr_rri)
+ return;
+
+ ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low,
+ lower_32_bits(ce->paddr_rri));
+ ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high,
+ (upper_32_bits(ce->paddr_rri) &
+ CE_DESC_ADDR_HI_MASK));
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;
+ ce_base_addr = ath10k_ce_base_address(ar, i);
+ value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs);
+ value |= ar->hw_ce_regs->upd->mask;
+ ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
+ }
+}
+EXPORT_SYMBOL(ath10k_ce_alloc_rri);
+
+void ath10k_ce_free_rri(struct ath10k *ar)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)),
+ ce->vaddr_rri,
+ ce->paddr_rri);
+}
+EXPORT_SYMBOL(ath10k_ce_free_rri);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
new file mode 100644
index 000000000000..27367bd64e95
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -0,0 +1,427 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CE_H_
+#define _CE_H_
+
+#include "hif.h"
+
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 8192
+
+/* Descriptor rings must be aligned to this boundary */
+#define CE_DESC_RING_ALIGN 8
+#define CE_SEND_FLAG_GATHER 0x00010000
+
+/*
+ * Copy Engine support: low-level Target-side Copy Engine API.
+ * This is a hardware access layer used by code that understands
+ * how to use copy engines.
+ */
+
+struct ath10k_ce_pipe;
+
+#define CE_DESC_FLAGS_GATHER (1 << 0)
+#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
+#define CE_WCN3990_DESC_FLAGS_GATHER BIT(31)
+
+#define CE_DESC_ADDR_MASK GENMASK_ULL(34, 0)
+#define CE_DESC_ADDR_HI_MASK GENMASK(4, 0)
+
+/* Following desc flags are used in QCA99X0 */
+#define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2)
+#define CE_DESC_FLAGS_TGT_INT_DIS (1 << 3)
+
+#define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask
+#define CE_DESC_FLAGS_META_DATA_LSB ar->hw_values->ce_desc_meta_data_lsb
+
+#define CE_DDR_RRI_MASK GENMASK(15, 0)
+#define CE_DDR_DRRI_SHIFT 16
+
+struct ce_desc {
+ __le32 addr;
+ __le16 nbytes;
+ __le16 flags; /* %CE_DESC_FLAGS_ */
+};
+
+struct ce_desc_64 {
+ __le64 addr;
+ __le16 nbytes; /* length in register map */
+ __le16 flags; /* fw_metadata_high */
+ __le32 toeplitz_hash_result;
+};
+
+#define CE_DESC_SIZE sizeof(struct ce_desc)
+#define CE_DESC_SIZE_64 sizeof(struct ce_desc_64)
+
+struct ath10k_ce_ring {
+ /* Number of entries in this ring; must be power of 2 */
+ unsigned int nentries;
+ unsigned int nentries_mask;
+
+ /*
+ * For dest ring, this is the next index to be processed
+ * by software after it was/is received into.
+ *
+ * For src ring, this is the last descriptor that was sent
+ * and completion processed by software.
+ *
+ * Regardless of src or dest ring, this is an invariant
+ * (modulo ring size):
+ * write index >= read index >= sw_index
+ */
+ unsigned int sw_index;
+ /* cached copy */
+ unsigned int write_index;
+ /*
+ * For src ring, this is the next index not yet processed by HW.
+ * This is a cached copy of the real HW index (read index), used
+ * for avoiding reading the HW index register more often than
+ * necessary.
+ * This extends the invariant:
+ * write index >= read index >= hw_index >= sw_index
+ *
+ * For dest ring, this is currently unused.
+ */
+ /* cached copy */
+ unsigned int hw_index;
+
+ /* Start of DMA-coherent area reserved for descriptors */
+ /* Host address space */
+ void *base_addr_owner_space_unaligned;
+ /* CE address space */
+ dma_addr_t base_addr_ce_space_unaligned;
+
+ /*
+ * Actual start of descriptors.
+ * Aligned to descriptor-size boundary.
+ * Points into reserved DMA-coherent area, above.
+ */
+ /* Host address space */
+ void *base_addr_owner_space;
+
+ /* CE address space */
+ dma_addr_t base_addr_ce_space;
+
+ char *shadow_base_unaligned;
+ struct ce_desc_64 *shadow_base;
+
+ /* keep last */
+ void *per_transfer_context[] __counted_by(nentries);
+};
+
+struct ath10k_ce_pipe {
+ struct ath10k *ar;
+ unsigned int id;
+
+ unsigned int attr_flags;
+
+ u32 ctrl_addr;
+
+ void (*send_cb)(struct ath10k_ce_pipe *);
+ void (*recv_cb)(struct ath10k_ce_pipe *);
+
+ unsigned int src_sz_max;
+ struct ath10k_ce_ring *src_ring;
+ struct ath10k_ce_ring *dest_ring;
+ const struct ath10k_ce_ops *ops;
+};
+
+/* Copy Engine settable attributes */
+struct ce_attr;
+
+struct ath10k_bus_ops {
+ u32 (*read32)(struct ath10k *ar, u32 offset);
+ void (*write32)(struct ath10k *ar, u32 offset, u32 value);
+ int (*get_num_banks)(struct ath10k *ar);
+};
+
+static inline struct ath10k_ce *ath10k_ce_priv(struct ath10k *ar)
+{
+ return (struct ath10k_ce *)ar->ce_priv;
+}
+
+struct ath10k_ce {
+ /* protects CE info */
+ spinlock_t ce_lock;
+ const struct ath10k_bus_ops *bus_ops;
+ struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
+ u32 *vaddr_rri;
+ dma_addr_t paddr_rri;
+};
+
+/*==================Send====================*/
+
+/* ath10k_ce_send flags */
+#define CE_SEND_FLAG_BYTE_SWAP 1
+
+/*
+ * Queue a source buffer to be sent to an anonymous destination buffer.
+ * ce - which copy engine to use
+ * buffer - address of buffer
+ * nbytes - number of bytes to send
+ * transfer_id - arbitrary ID; reflected to destination
+ * flags - CE_SEND_FLAG_* values
+ * Returns 0 on success; otherwise an error status.
+ *
+ * Note: If no flags are specified, use CE's default data swap mode.
+ *
+ * Implementation note: pushes 1 buffer to Source ring
+ */
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_send_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ /* 14 bits */
+ unsigned int transfer_id,
+ unsigned int flags);
+
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags);
+
+void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);
+
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
+
+/*==================Recv=======================*/
+
+int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr);
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
+
+/* recv flags */
+/* Data is byte-swapped */
+#define CE_RECV_FLAG_SWAPPED 1
+
+/*
+ * Supply data for the next completed unprocessed receive descriptor.
+ * Pops buffer from Dest ring.
+ */
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ unsigned int *nbytesp);
+/*
+ * Supply data for the next completed unprocessed send descriptor.
+ * Pops 1 completed send buffer from Source ring.
+ */
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp);
+
+int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp);
+
+/*==================CE Engine Initialization=======================*/
+
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr);
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+ const struct ce_attr *attr);
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
+
+/*==================CE Engine Shutdown=======================*/
+/*
+ * Support clean shutdown by allowing the caller to revoke
+ * receive buffers. Target DMA must be stopped before using
+ * this API.
+ */
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp);
+
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ unsigned int *nbytesp);
+
+/*
+ * Support clean shutdown by allowing the caller to cancel
+ * pending sends. Target DMA must be stopped before using
+ * this API.
+ */
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp);
+
+/*==================CE Interrupt Handlers====================*/
+void ath10k_ce_per_engine_service_any(struct ath10k *ar);
+void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
+void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id);
+void ath10k_ce_disable_interrupts(struct ath10k *ar);
+void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id);
+void ath10k_ce_enable_interrupts(struct ath10k *ar);
+void ath10k_ce_dump_registers(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data);
+
+void ath10k_ce_alloc_rri(struct ath10k *ar);
+void ath10k_ce_free_rri(struct ath10k *ar);
+
+/* ce_attr.flags values */
+/* Use NonSnooping PCIe accesses? */
+#define CE_ATTR_NO_SNOOP BIT(0)
+
+/* Byte swap data words */
+#define CE_ATTR_BYTE_SWAP_DATA BIT(1)
+
+/* Swizzle descriptors? */
+#define CE_ATTR_SWIZZLE_DESCRIPTORS BIT(2)
+
+/* no interrupt on copy completion */
+#define CE_ATTR_DIS_INTR BIT(3)
+
+/* no interrupt, only polling */
+#define CE_ATTR_POLL BIT(4)
+
+/* Attributes of an instance of a Copy Engine */
+struct ce_attr {
+ /* CE_ATTR_* values */
+ unsigned int flags;
+
+ /* #entries in source ring - Must be a power of 2 */
+ unsigned int src_nentries;
+
+ /*
+ * Max source send size for this CE.
+ * This is also the minimum size of a destination buffer.
+ */
+ unsigned int src_sz_max;
+
+ /* #entries in destination ring - Must be a power of 2 */
+ unsigned int dest_nentries;
+
+ void (*send_cb)(struct ath10k_ce_pipe *);
+ void (*recv_cb)(struct ath10k_ce_pipe *);
+};
+
+struct ath10k_ce_ops {
+ struct ath10k_ce_ring *(*ce_alloc_src_ring)(struct ath10k *ar,
+ u32 ce_id,
+ const struct ce_attr *attr);
+ struct ath10k_ce_ring *(*ce_alloc_dst_ring)(struct ath10k *ar,
+ u32 ce_id,
+ const struct ce_attr *attr);
+ int (*ce_rx_post_buf)(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr);
+ int (*ce_completed_recv_next_nolock)(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *nbytesp);
+ int (*ce_revoke_recv_next)(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *nbytesp);
+ void (*ce_extract_desc_data)(struct ath10k *ar,
+ struct ath10k_ce_ring *src_ring,
+ u32 sw_index, dma_addr_t *bufferp,
+ u32 *nbytesp, u32 *transfer_idp);
+ void (*ce_free_pipe)(struct ath10k *ar, int ce_id);
+ int (*ce_send_nolock)(struct ath10k_ce_pipe *pipe,
+ void *per_transfer_context,
+ dma_addr_t buffer, u32 nbytes,
+ u32 transfer_id, u32 flags);
+ void (*ce_set_src_ring_base_addr_hi)(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ u64 addr);
+ void (*ce_set_dest_ring_base_addr_hi)(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ u64 addr);
+ int (*ce_completed_send_next_nolock)(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp);
+};
+
+static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
+{
+ return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
+}
+
+#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) (((COPY_ENGINE_BASE_ADDRESS) \
+ - CE0_BASE_ADDRESS) / (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS))
+
+#define CE_SRC_RING_TO_DESC(baddr, idx) \
+ (&(((struct ce_desc *)baddr)[idx]))
+
+#define CE_DEST_RING_TO_DESC(baddr, idx) \
+ (&(((struct ce_desc *)baddr)[idx]))
+
+#define CE_SRC_RING_TO_DESC_64(baddr, idx) \
+ (&(((struct ce_desc_64 *)baddr)[idx]))
+
+#define CE_DEST_RING_TO_DESC_64(baddr, idx) \
+ (&(((struct ce_desc_64 *)baddr)[idx]))
+
+/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
+#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
+ (((int)(toidx) - (int)(fromidx)) & (nentries_mask))
+
+#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+#define CE_RING_IDX_ADD(nentries_mask, idx, num) \
+ (((idx) + (num)) & (nentries_mask))
+
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
+ ar->regs->ce_wrap_intr_sum_host_msi_lsb
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \
+ ar->regs->ce_wrap_intr_sum_host_msi_mask
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
+ (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
+ CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
+#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
+
+static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(
+ ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS +
+ CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
+}
+
+/* Host software's Copy Engine configuration. */
+#define CE_ATTR_FLAGS 0
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+ __le32 pipenum;
+ __le32 pipedir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE 0
+#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT 3 /* bidirectional */
+
+/* Establish a mapping between a service/direction and a pipe. */
+struct ce_service_to_pipe {
+ __le32 service_id;
+ __le32 pipedir;
+ __le32 pipenum;
+};
+
+#endif /* _CE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
new file mode 100644
index 000000000000..7c2939cbde5f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -0,0 +1,3784 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/dmi.h>
+#include <linux/ctype.h>
+#include <linux/pm_qos.h>
+#include <linux/nvmem-consumer.h>
+#include <asm/byteorder.h>
+
+#include "core.h"
+#include "mac.h"
+#include "htc.h"
+#include "hif.h"
+#include "wmi.h"
+#include "bmi.h"
+#include "debug.h"
+#include "htt.h"
+#include "testmode.h"
+#include "wmi-ops.h"
+#include "coredump.h"
+#include "leds.h"
+
+unsigned int ath10k_debug_mask;
+EXPORT_SYMBOL(ath10k_debug_mask);
+
+static unsigned int ath10k_cryptmode_param;
+static bool uart_print;
+static bool skip_otp;
+static bool fw_diag_log;
+
+/* frame mode values are mapped as per enum ath10k_hw_txrx_mode */
+unsigned int ath10k_frame_mode = ATH10K_HW_TXRX_NATIVE_WIFI;
+
+unsigned long ath10k_coredump_mask = BIT(ATH10K_FW_CRASH_DUMP_REGISTERS) |
+ BIT(ATH10K_FW_CRASH_DUMP_CE_DATA);
+
+/* FIXME: most of these should be readonly */
+module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
+module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
+module_param(uart_print, bool, 0644);
+module_param(skip_otp, bool, 0644);
+module_param(fw_diag_log, bool, 0644);
+module_param_named(frame_mode, ath10k_frame_mode, uint, 0644);
+module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444);
+
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+MODULE_PARM_DESC(uart_print, "Uart target debugging");
+MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
+MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
+MODULE_PARM_DESC(frame_mode,
+ "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
+MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file");
+MODULE_PARM_DESC(fw_diag_log, "Diag based fw log debugging");
+
+static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ {
+ .id = QCA988X_HW_2_0_VERSION,
+ .dev_id = QCA988X_2_0_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
+ .name = "qca988x hw2.0",
+ .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .led_pin = 1,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 2116,
+ .fw = {
+ .dir = QCA988X_HW_2_0_FW_DIR,
+ .board_size = QCA988X_BOARD_DATA_SZ,
+ .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
+ },
+ .rx_desc_ops = &qca988x_rx_desc_ops,
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
+ .credit_size_workaround = false,
+ .tx_stats_over_pktlog = true,
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
+ },
+ {
+ .id = QCA988X_HW_2_0_VERSION,
+ .dev_id = QCA988X_2_0_DEVICE_ID_UBNT,
+ .name = "qca988x hw2.0 ubiquiti",
+ .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .led_pin = 0,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 2116,
+ .fw = {
+ .dir = QCA988X_HW_2_0_FW_DIR,
+ .board_size = QCA988X_BOARD_DATA_SZ,
+ .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
+ },
+ .rx_desc_ops = &qca988x_rx_desc_ops,
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
+ .credit_size_workaround = false,
+ .tx_stats_over_pktlog = true,
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
+ },
+ {
+ .id = QCA9887_HW_1_0_VERSION,
+ .dev_id = QCA9887_1_0_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
+ .name = "qca9887 hw1.0",
+ .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .led_pin = 1,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 2116,
+ .fw = {
+ .dir = QCA9887_HW_1_0_FW_DIR,
+ .board_size = QCA9887_BOARD_DATA_SZ,
+ .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
+ },
+ .rx_desc_ops = &qca988x_rx_desc_ops,
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
+ .credit_size_workaround = false,
+ .tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
+ },
+ {
+ .id = QCA6174_HW_3_2_VERSION,
+ .dev_id = QCA6174_3_2_DEVICE_ID,
+ .bus = ATH10K_BUS_SDIO,
+ .name = "qca6174 hw3.2 sdio",
+ .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
+ .uart_pin = 19,
+ .led_pin = 0,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 0,
+ .fw = {
+ .dir = QCA6174_HW_3_0_FW_DIR,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ .rx_desc_ops = &qca988x_rx_desc_ops,
+ .hw_ops = &qca6174_sdio_ops,
+ .hw_clk = qca6174_clk,
+ .target_cpu_freq = 176000000,
+ .decap_align_bytes = 4,
+ .n_cipher_suites = 8,
+ .num_peers = 10,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .uart_pin_workaround = true,
+ .tx_stats_over_pktlog = false,
+ .credit_size_workaround = false,
+ .bmi_large_size_download = true,
+ .supports_peer_stats_info = true,
+ .dynamic_sar_support = true,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
+ },
+ {
+ .id = QCA6174_HW_2_1_VERSION,
+ .dev_id = QCA6164_2_1_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
+ .name = "qca6164 hw2.1",
+ .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .led_pin = 0,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
+ .fw = {
+ .dir = QCA6174_HW_2_1_FW_DIR,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ .rx_desc_ops = &qca988x_rx_desc_ops,
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
+ .credit_size_workaround = false,
+ .tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
+ },
+ {
+ .id = QCA6174_HW_2_1_VERSION,
+ .dev_id = QCA6174_2_1_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
+ .name = "qca6174 hw2.1",
+ .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .led_pin = 0,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
+ .fw = {
+ .dir = QCA6174_HW_2_1_FW_DIR,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ .rx_desc_ops = &qca988x_rx_desc_ops,
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
+ .credit_size_workaround = false,
+ .tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
+ },
+ {
+ .id = QCA6174_HW_3_0_VERSION,
+ .dev_id = QCA6174_2_1_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
+ .name = "qca6174 hw3.0",
+ .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .led_pin = 0,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
+ .fw = {
+ .dir = QCA6174_HW_3_0_FW_DIR,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ .rx_desc_ops = &qca988x_rx_desc_ops,
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
+ .credit_size_workaround = false,
+ .tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
+ },
+ {
+ .id = QCA6174_HW_3_2_VERSION,
+ .dev_id = QCA6174_2_1_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
+ .name = "qca6174 hw3.2",
+ .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .led_pin = 0,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
+ .fw = {
+ /* uses same binaries as hw3.0 */
+ .dir = QCA6174_HW_3_0_FW_DIR,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ .rx_desc_ops = &qca988x_rx_desc_ops,
+ .hw_ops = &qca6174_ops,
+ .hw_clk = qca6174_clk,
+ .target_cpu_freq = 176000000,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = true,
+ .credit_size_workaround = false,
+ .tx_stats_over_pktlog = false,
+ .supports_peer_stats_info = true,
+ .dynamic_sar_support = true,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
+ .mcast_frame_registration = true,
+ },
+ {
+ .id = QCA99X0_HW_2_0_DEV_VERSION,
+ .dev_id = QCA99X0_2_0_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
+ .name = "qca99x0 hw2.0",
+ .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .led_pin = 17,
+ .otp_exe_param = 0x00000700,
+ .continuous_frag_desc = true,
+ .cck_rate_map_rev2 = true,
+ .channel_counters_freq_hz = 150000,
+ .max_probe_resp_desc_thres = 24,
+ .tx_chain_mask = 0xf,
+ .rx_chain_mask = 0xf,
+ .max_spatial_stream = 4,
+ .cal_data_len = 12064,
+ .fw = {
+ .dir = QCA99X0_HW_2_0_FW_DIR,
+ .board_size = QCA99X0_BOARD_DATA_SZ,
+ .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
+ .spectral_bin_discard = 4,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 11,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
+ .credit_size_workaround = false,
+ .tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
+ },
+ {
+ .id = QCA9984_HW_1_0_DEV_VERSION,
+ .dev_id = QCA9984_1_0_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
+ .name = "qca9984/qca9994 hw1.0",
+ .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .led_pin = 17,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
+ .otp_exe_param = 0x00000700,
+ .continuous_frag_desc = true,
+ .cck_rate_map_rev2 = true,
+ .channel_counters_freq_hz = 150000,
+ .max_probe_resp_desc_thres = 24,
+ .tx_chain_mask = 0xf,
+ .rx_chain_mask = 0xf,
+ .max_spatial_stream = 4,
+ .cal_data_len = 12064,
+ .fw = {
+ .dir = QCA9984_HW_1_0_FW_DIR,
+ .board_size = QCA99X0_BOARD_DATA_SZ,
+ .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+ .ext_board_size = QCA99X0_EXT_BOARD_DATA_SZ,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
+ .spectral_bin_discard = 12,
+ .spectral_bin_offset = 8,
+
+ /* Can do only 2x2 VHT160 or 80+80. 1560Mbps is 4x4 80Mhz
+ * or 2x2 160Mhz, long-guard-interval.
+ */
+ .vht160_mcs_rx_highest = 1560,
+ .vht160_mcs_tx_highest = 1560,
+ .n_cipher_suites = 11,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
+ .credit_size_workaround = false,
+ .tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
+ },
+ {
+ .id = QCA9888_HW_2_0_DEV_VERSION,
+ .dev_id = QCA9888_2_0_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
+ .name = "qca9888 hw2.0",
+ .patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .led_pin = 17,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
+ .otp_exe_param = 0x00000700,
+ .continuous_frag_desc = true,
+ .channel_counters_freq_hz = 150000,
+ .max_probe_resp_desc_thres = 24,
+ .tx_chain_mask = 3,
+ .rx_chain_mask = 3,
+ .max_spatial_stream = 2,
+ .cal_data_len = 12064,
+ .fw = {
+ .dir = QCA9888_HW_2_0_FW_DIR,
+ .board_size = QCA99X0_BOARD_DATA_SZ,
+ .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
+ .spectral_bin_discard = 12,
+ .spectral_bin_offset = 8,
+
+ /* Can do only 1x1 VHT160 or 80+80. 780Mbps is 2x2 80Mhz or
+ * 1x1 160Mhz, long-guard-interval.
+ */
+ .vht160_mcs_rx_highest = 780,
+ .vht160_mcs_tx_highest = 780,
+ .n_cipher_suites = 11,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
+ .credit_size_workaround = false,
+ .tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
+ },
+ {
+ .id = QCA9377_HW_1_0_DEV_VERSION,
+ .dev_id = QCA9377_1_0_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
+ .name = "qca9377 hw1.0",
+ .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .led_pin = 0,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
+ .fw = {
+ .dir = QCA9377_HW_1_0_FW_DIR,
+ .board_size = QCA9377_BOARD_DATA_SZ,
+ .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
+ },
+ .rx_desc_ops = &qca988x_rx_desc_ops,
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
+ .credit_size_workaround = false,
+ .tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
+ },
+ {
+ .id = QCA9377_HW_1_1_DEV_VERSION,
+ .dev_id = QCA9377_1_0_DEVICE_ID,
+ .bus = ATH10K_BUS_PCI,
+ .name = "qca9377 hw1.1",
+ .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .led_pin = 0,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
+ .fw = {
+ .dir = QCA9377_HW_1_0_FW_DIR,
+ .board_size = QCA9377_BOARD_DATA_SZ,
+ .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
+ },
+ .rx_desc_ops = &qca988x_rx_desc_ops,
+ .hw_ops = &qca6174_ops,
+ .hw_clk = qca6174_clk,
+ .target_cpu_freq = 176000000,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = true,
+ .credit_size_workaround = false,
+ .tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
+ },
+ {
+ .id = QCA9377_HW_1_1_DEV_VERSION,
+ .dev_id = QCA9377_1_0_DEVICE_ID,
+ .bus = ATH10K_BUS_SDIO,
+ .name = "qca9377 hw1.1 sdio",
+ .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 19,
+ .led_pin = 0,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
+ .fw = {
+ .dir = QCA9377_HW_1_0_FW_DIR,
+ .board_size = QCA9377_BOARD_DATA_SZ,
+ .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
+ },
+ .rx_desc_ops = &qca988x_rx_desc_ops,
+ .hw_ops = &qca6174_ops,
+ .hw_clk = qca6174_clk,
+ .target_cpu_freq = 176000000,
+ .decap_align_bytes = 4,
+ .n_cipher_suites = 8,
+ .num_peers = TARGET_QCA9377_HL_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .uart_pin_workaround = true,
+ .credit_size_workaround = true,
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
+ },
+ {
+ .id = QCA4019_HW_1_0_DEV_VERSION,
+ .dev_id = 0,
+ .bus = ATH10K_BUS_AHB,
+ .name = "qca4019 hw1.0",
+ .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .led_pin = 0,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
+ .otp_exe_param = 0x0010000,
+ .continuous_frag_desc = true,
+ .cck_rate_map_rev2 = true,
+ .channel_counters_freq_hz = 125000,
+ .max_probe_resp_desc_thres = 24,
+ .tx_chain_mask = 0x3,
+ .rx_chain_mask = 0x3,
+ .max_spatial_stream = 2,
+ .cal_data_len = 12064,
+ .fw = {
+ .dir = QCA4019_HW_1_0_FW_DIR,
+ .board_size = QCA4019_BOARD_DATA_SZ,
+ .board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &qca99x0_rx_desc_ops,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
+ .spectral_bin_discard = 4,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 11,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ .fw_diag_ce_download = false,
+ .credit_size_workaround = false,
+ .tx_stats_over_pktlog = false,
+ .dynamic_sar_support = false,
+ .hw_restart_disconnect = false,
+ .use_fw_tx_credits = true,
+ .delay_unmap_buffer = false,
+ .mcast_frame_registration = false,
+ },
+ {
+ .id = WCN3990_HW_1_0_DEV_VERSION,
+ .dev_id = 0,
+ .bus = ATH10K_BUS_SNOC,
+ .name = "wcn3990 hw1.0",
+ .led_pin = 0,
+ .continuous_frag_desc = true,
+ .tx_chain_mask = 0x7,
+ .rx_chain_mask = 0x7,
+ .max_spatial_stream = 4,
+ .fw = {
+ .dir = WCN3990_HW_1_0_FW_DIR,
+ .board_size = WCN3990_BOARD_DATA_SZ,
+ .board_ext_size = WCN3990_BOARD_EXT_DATA_SZ,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .rx_desc_ops = &wcn3990_rx_desc_ops,
+ .hw_ops = &wcn3990_ops,
+ .decap_align_bytes = 1,
+ .num_peers = TARGET_HL_TLV_NUM_PEERS,
+ .n_cipher_suites = 11,
+ .ast_skid_limit = TARGET_HL_TLV_AST_SKID_LIMIT,
+ .num_wds_entries = TARGET_HL_TLV_NUM_WDS_ENTRIES,
+ .target_64bit = true,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC,
+ .shadow_reg_support = true,
+ .rri_on_ddr = true,
+ .hw_filter_reset_required = false,
+ .fw_diag_ce_download = false,
+ .credit_size_workaround = false,
+ .tx_stats_over_pktlog = false,
+ .dynamic_sar_support = true,
+ .hw_restart_disconnect = true,
+ .use_fw_tx_credits = false,
+ .delay_unmap_buffer = true,
+ .mcast_frame_registration = false,
+ },
+};
+
+static const char *const ath10k_core_fw_feature_str[] = {
+ [ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX] = "wmi-mgmt-rx",
+ [ATH10K_FW_FEATURE_WMI_10X] = "wmi-10.x",
+ [ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX] = "has-wmi-mgmt-tx",
+ [ATH10K_FW_FEATURE_NO_P2P] = "no-p2p",
+ [ATH10K_FW_FEATURE_WMI_10_2] = "wmi-10.2",
+ [ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT] = "multi-vif-ps",
+ [ATH10K_FW_FEATURE_WOWLAN_SUPPORT] = "wowlan",
+ [ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp",
+ [ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad",
+ [ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init",
+ [ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode",
+ [ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca",
+ [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp",
+ [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
+ [ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
+ [ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
+ [ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast",
+ [ATH10K_FW_FEATURE_NO_PS] = "no-ps",
+ [ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference",
+ [ATH10K_FW_FEATURE_NON_BMI] = "non-bmi",
+ [ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL] = "single-chan-info-per-channel",
+ [ATH10K_FW_FEATURE_PEER_FIXED_RATE] = "peer-fixed-rate",
+ [ATH10K_FW_FEATURE_IRAM_RECOVERY] = "iram-recovery",
+};
+
+static unsigned int ath10k_core_get_fw_feature_str(char *buf,
+ size_t buf_len,
+ enum ath10k_fw_features feat)
+{
+ /* make sure that ath10k_core_fw_feature_str[] gets updated */
+ BUILD_BUG_ON(ARRAY_SIZE(ath10k_core_fw_feature_str) !=
+ ATH10K_FW_FEATURE_COUNT);
+
+ if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) ||
+ WARN_ON(!ath10k_core_fw_feature_str[feat])) {
+ return scnprintf(buf, buf_len, "bit%d", feat);
+ }
+
+ return scnprintf(buf, buf_len, "%s", ath10k_core_fw_feature_str[feat]);
+}
+
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+ char *buf,
+ size_t buf_len)
+{
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+ if (test_bit(i, ar->normal_mode_fw.fw_file.fw_features)) {
+ if (len > 0)
+ len += scnprintf(buf + len, buf_len - len, ",");
+
+ len += ath10k_core_get_fw_feature_str(buf + len,
+ buf_len - len,
+ i);
+ }
+ }
+}
+
+static void ath10k_send_suspend_complete(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n");
+
+ complete(&ar->target_suspend);
+}
+
+static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
+{
+ bool mtu_workaround = ar->hw_params.credit_size_workaround;
+ int ret;
+ u32 param = 0;
+
+ ret = ath10k_bmi_write32(ar, hi_mbox_io_block_sz, 256);
+ if (ret)
+ return ret;
+
+ ret = ath10k_bmi_write32(ar, hi_mbox_isr_yield_limit, 99);
+ if (ret)
+ return ret;
+
+ ret = ath10k_bmi_read32(ar, hi_acs_flags, &param);
+ if (ret)
+ return ret;
+
+ param |= HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET;
+
+ if (mode == ATH10K_FIRMWARE_MODE_NORMAL && !mtu_workaround)
+ param |= HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
+ else
+ param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE;
+
+ if (mode == ATH10K_FIRMWARE_MODE_UTF)
+ param &= ~HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET;
+ else
+ param |= HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET;
+
+ ret = ath10k_bmi_write32(ar, hi_acs_flags, param);
+ if (ret)
+ return ret;
+
+ ret = ath10k_bmi_read32(ar, hi_option_flag2, &param);
+ if (ret)
+ return ret;
+
+ param |= HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_HOST;
+
+ ret = ath10k_bmi_write32(ar, hi_option_flag2, param);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ath10k_init_configure_target(struct ath10k *ar)
+{
+ u32 param_host;
+ int ret;
+
+ /* tell target which HTC version it is used*/
+ ret = ath10k_bmi_write32(ar, hi_app_host_interest,
+ HTC_PROTOCOL_VERSION);
+ if (ret) {
+ ath10k_err(ar, "settings HTC version failed\n");
+ return ret;
+ }
+
+ /* set the firmware mode to STA/IBSS/AP */
+ ret = ath10k_bmi_read32(ar, hi_option_flag, &param_host);
+ if (ret) {
+ ath10k_err(ar, "setting firmware mode (1/2) failed\n");
+ return ret;
+ }
+
+ /* TODO following parameters need to be re-visited. */
+ /* num_device */
+ param_host |= (1 << HI_OPTION_NUM_DEV_SHIFT);
+ /* Firmware mode */
+ /* FIXME: Why FW_MODE_AP ??.*/
+ param_host |= (HI_OPTION_FW_MODE_AP << HI_OPTION_FW_MODE_SHIFT);
+ /* mac_addr_method */
+ param_host |= (1 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
+ /* firmware_bridge */
+ param_host |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
+ /* fwsubmode */
+ param_host |= (0 << HI_OPTION_FW_SUBMODE_SHIFT);
+
+ ret = ath10k_bmi_write32(ar, hi_option_flag, param_host);
+ if (ret) {
+ ath10k_err(ar, "setting firmware mode (2/2) failed\n");
+ return ret;
+ }
+
+ /* We do all byte-swapping on the host */
+ ret = ath10k_bmi_write32(ar, hi_be, 0);
+ if (ret) {
+ ath10k_err(ar, "setting host CPU BE mode failed\n");
+ return ret;
+ }
+
+ /* FW descriptor/Data swap flags */
+ ret = ath10k_bmi_write32(ar, hi_fw_swap, 0);
+
+ if (ret) {
+ ath10k_err(ar, "setting FW data/desc swap flags failed\n");
+ return ret;
+ }
+
+ /* Some devices have a special sanity check that verifies the PCI
+ * Device ID is written to this host interest var. It is known to be
+ * required to boot QCA6164.
+ */
+ ret = ath10k_bmi_write32(ar, hi_hci_uart_pwr_mgmt_params_ext,
+ ar->dev_id);
+ if (ret) {
+ ath10k_err(ar, "failed to set pwr_mgmt_params: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
+ const char *dir,
+ const char *file)
+{
+ char filename[100];
+ const struct firmware *fw;
+ int ret;
+
+ if (file == NULL)
+ return ERR_PTR(-ENOENT);
+
+ if (dir == NULL)
+ dir = ".";
+
+ if (ar->board_name) {
+ snprintf(filename, sizeof(filename), "%s/%s/%s",
+ dir, ar->board_name, file);
+ ret = firmware_request_nowarn(&fw, filename, ar->dev);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n",
+ filename, ret);
+ if (!ret)
+ return fw;
+ }
+
+ snprintf(filename, sizeof(filename), "%s/%s", dir, file);
+ ret = firmware_request_nowarn(&fw, filename, ar->dev);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n",
+ filename, ret);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return fw;
+}
+
+static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data,
+ size_t data_len)
+{
+ u32 board_data_size = ar->hw_params.fw.board_size;
+ u32 board_ext_data_size = ar->hw_params.fw.board_ext_size;
+ u32 board_ext_data_addr;
+ int ret;
+
+ ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr);
+ if (ret) {
+ ath10k_err(ar, "could not read board ext data addr (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot push board extended data addr 0x%x\n",
+ board_ext_data_addr);
+
+ if (board_ext_data_addr == 0)
+ return 0;
+
+ if (data_len != (board_data_size + board_ext_data_size)) {
+ ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n",
+ data_len, board_data_size, board_ext_data_size);
+ return -EINVAL;
+ }
+
+ ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
+ data + board_data_size,
+ board_ext_data_size);
+ if (ret) {
+ ath10k_err(ar, "could not write board ext data (%d)\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_bmi_write32(ar, hi_board_ext_data_config,
+ (board_ext_data_size << 16) | 1);
+ if (ret) {
+ ath10k_err(ar, "could not write board ext data bit (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
+{
+ u32 result, address;
+ u8 board_id, chip_id;
+ bool ext_bid_support;
+ int ret, bmi_board_id_param;
+
+ address = ar->hw_params.patch_load_addr;
+
+ if (!ar->normal_mode_fw.fw_file.otp_data ||
+ !ar->normal_mode_fw.fw_file.otp_len) {
+ ath10k_warn(ar,
+ "failed to retrieve board id because of invalid otp\n");
+ return -ENODATA;
+ }
+
+ if (ar->id.bmi_ids_valid) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot already acquired valid otp board id,skip download, board_id %d chip_id %d\n",
+ ar->id.bmi_board_id, ar->id.bmi_chip_id);
+ goto skip_otp_download;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot upload otp to 0x%x len %zd for board id\n",
+ address, ar->normal_mode_fw.fw_file.otp_len);
+
+ ret = ath10k_bmi_fast_download(ar, address,
+ ar->normal_mode_fw.fw_file.otp_data,
+ ar->normal_mode_fw.fw_file.otp_len);
+ if (ret) {
+ ath10k_err(ar, "could not write otp for board id check: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
+ ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE ||
+ ar->cal_mode == ATH10K_PRE_CAL_MODE_NVMEM)
+ bmi_board_id_param = BMI_PARAM_GET_FLASH_BOARD_ID;
+ else
+ bmi_board_id_param = BMI_PARAM_GET_EEPROM_BOARD_ID;
+
+ ret = ath10k_bmi_execute(ar, address, bmi_board_id_param, &result);
+ if (ret) {
+ ath10k_err(ar, "could not execute otp for board id check: %d\n",
+ ret);
+ return ret;
+ }
+
+ board_id = MS(result, ATH10K_BMI_BOARD_ID_FROM_OTP);
+ chip_id = MS(result, ATH10K_BMI_CHIP_ID_FROM_OTP);
+ ext_bid_support = (result & ATH10K_BMI_EXT_BOARD_ID_SUPPORT);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot get otp board id result 0x%08x board_id %d chip_id %d ext_bid_support %d\n",
+ result, board_id, chip_id, ext_bid_support);
+
+ ar->id.ext_bid_supported = ext_bid_support;
+
+ if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 ||
+ (board_id == 0)) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "board id does not exist in otp, ignore it\n");
+ return -EOPNOTSUPP;
+ }
+
+ ar->id.bmi_ids_valid = true;
+ ar->id.bmi_board_id = board_id;
+ ar->id.bmi_chip_id = chip_id;
+
+skip_otp_download:
+
+ return 0;
+}
+
+static void ath10k_core_check_bdfext(const struct dmi_header *hdr, void *data)
+{
+ struct ath10k *ar = data;
+ const char *bdf_ext;
+ const char *magic = ATH10K_SMBIOS_BDF_EXT_MAGIC;
+ u8 bdf_enabled;
+ int i;
+
+ if (hdr->type != ATH10K_SMBIOS_BDF_EXT_TYPE)
+ return;
+
+ if (hdr->length != ATH10K_SMBIOS_BDF_EXT_LENGTH) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "wrong smbios bdf ext type length (%d).\n",
+ hdr->length);
+ return;
+ }
+
+ bdf_enabled = *((u8 *)hdr + ATH10K_SMBIOS_BDF_EXT_OFFSET);
+ if (!bdf_enabled) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not found.\n");
+ return;
+ }
+
+ /* Only one string exists (per spec) */
+ bdf_ext = (char *)hdr + hdr->length;
+
+ if (memcmp(bdf_ext, magic, strlen(magic)) != 0) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "bdf variant magic does not match.\n");
+ return;
+ }
+
+ for (i = 0; i < strlen(bdf_ext); i++) {
+ if (!isascii(bdf_ext[i]) || !isprint(bdf_ext[i])) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "bdf variant name contains non ascii chars.\n");
+ return;
+ }
+ }
+
+ /* Copy extension name without magic suffix */
+ if (strscpy(ar->id.bdf_ext, bdf_ext + strlen(magic),
+ sizeof(ar->id.bdf_ext)) < 0) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
+ bdf_ext);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found and validated bdf variant smbios_type 0x%x bdf %s\n",
+ ATH10K_SMBIOS_BDF_EXT_TYPE, bdf_ext);
+}
+
+static int ath10k_core_check_smbios(struct ath10k *ar)
+{
+ ar->id.bdf_ext[0] = '\0';
+ dmi_walk(ath10k_core_check_bdfext, ar);
+
+ if (ar->id.bdf_ext[0] == '\0')
+ return -ENODATA;
+
+ return 0;
+}
+
+int ath10k_core_check_dt(struct ath10k *ar)
+{
+ struct device_node *node;
+ const char *variant = NULL;
+
+ node = ar->dev->of_node;
+ if (!node)
+ return -ENOENT;
+
+ of_property_read_string(node, "qcom,calibration-variant",
+ &variant);
+ if (!variant)
+ of_property_read_string(node, "qcom,ath10k-calibration-variant",
+ &variant);
+ if (!variant)
+ return -ENODATA;
+
+ if (strscpy(ar->id.bdf_ext, variant, sizeof(ar->id.bdf_ext)) < 0)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
+ variant);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath10k_core_check_dt);
+
+static int ath10k_download_fw(struct ath10k *ar)
+{
+ u32 address, data_len;
+ const void *data;
+ int ret;
+ struct pm_qos_request latency_qos = {};
+
+ address = ar->hw_params.patch_load_addr;
+
+ data = ar->running_fw->fw_file.firmware_data;
+ data_len = ar->running_fw->fw_file.firmware_len;
+
+ ret = ath10k_swap_code_seg_configure(ar, &ar->running_fw->fw_file);
+ if (ret) {
+ ath10k_err(ar, "failed to configure fw code swap: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot uploading firmware image %p len %d\n",
+ data, data_len);
+
+ /* Check if device supports to download firmware via
+ * diag copy engine. Downloading firmware via diag CE
+ * greatly reduces the time to download firmware.
+ */
+ if (ar->hw_params.fw_diag_ce_download) {
+ ret = ath10k_hw_diag_fast_download(ar, address,
+ data, data_len);
+ if (ret == 0)
+ /* firmware upload via diag ce was successful */
+ return 0;
+
+ ath10k_warn(ar,
+ "failed to upload firmware via diag ce, trying BMI: %d",
+ ret);
+ }
+
+ cpu_latency_qos_add_request(&latency_qos, 0);
+
+ ret = ath10k_bmi_fast_download(ar, address, data, data_len);
+
+ cpu_latency_qos_remove_request(&latency_qos);
+
+ return ret;
+}
+
+void ath10k_core_free_board_files(struct ath10k *ar)
+{
+ if (!IS_ERR(ar->normal_mode_fw.board))
+ release_firmware(ar->normal_mode_fw.board);
+
+ if (!IS_ERR(ar->normal_mode_fw.ext_board))
+ release_firmware(ar->normal_mode_fw.ext_board);
+
+ ar->normal_mode_fw.board = NULL;
+ ar->normal_mode_fw.board_data = NULL;
+ ar->normal_mode_fw.board_len = 0;
+ ar->normal_mode_fw.ext_board = NULL;
+ ar->normal_mode_fw.ext_board_data = NULL;
+ ar->normal_mode_fw.ext_board_len = 0;
+}
+EXPORT_SYMBOL(ath10k_core_free_board_files);
+
+static void ath10k_core_free_firmware_files(struct ath10k *ar)
+{
+ if (!IS_ERR(ar->normal_mode_fw.fw_file.firmware))
+ release_firmware(ar->normal_mode_fw.fw_file.firmware);
+
+ if (!IS_ERR(ar->cal_file))
+ release_firmware(ar->cal_file);
+
+ if (!IS_ERR(ar->pre_cal_file))
+ release_firmware(ar->pre_cal_file);
+
+ ath10k_swap_code_seg_release(ar, &ar->normal_mode_fw.fw_file);
+
+ ar->normal_mode_fw.fw_file.otp_data = NULL;
+ ar->normal_mode_fw.fw_file.otp_len = 0;
+
+ ar->normal_mode_fw.fw_file.firmware = NULL;
+ ar->normal_mode_fw.fw_file.firmware_data = NULL;
+ ar->normal_mode_fw.fw_file.firmware_len = 0;
+
+ ar->cal_file = NULL;
+ ar->pre_cal_file = NULL;
+}
+
+static int ath10k_fetch_cal_file(struct ath10k *ar)
+{
+ char filename[100];
+
+ /* pre-cal-<bus>-<id>.bin */
+ scnprintf(filename, sizeof(filename), "pre-cal-%s-%s.bin",
+ ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
+ ar->pre_cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
+ if (!IS_ERR(ar->pre_cal_file))
+ goto success;
+
+ /* cal-<bus>-<id>.bin */
+ scnprintf(filename, sizeof(filename), "cal-%s-%s.bin",
+ ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
+ ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
+ if (IS_ERR(ar->cal_file))
+ /* calibration file is optional, don't print any warnings */
+ return PTR_ERR(ar->cal_file);
+success:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
+ ATH10K_FW_DIR, filename);
+
+ return 0;
+}
+
+static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type)
+{
+ const struct firmware *fw;
+ char boardname[100];
+
+ if (bd_ie_type == ATH10K_BD_IE_BOARD) {
+ scnprintf(boardname, sizeof(boardname), "board-%s-%s.bin",
+ ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
+ ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ boardname);
+ if (IS_ERR(ar->normal_mode_fw.board)) {
+ fw = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ATH10K_BOARD_DATA_FILE);
+ ar->normal_mode_fw.board = fw;
+ }
+
+ if (IS_ERR(ar->normal_mode_fw.board))
+ return PTR_ERR(ar->normal_mode_fw.board);
+
+ ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data;
+ ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size;
+ } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) {
+ fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+ ATH10K_EBOARD_DATA_FILE);
+ ar->normal_mode_fw.ext_board = fw;
+ if (IS_ERR(ar->normal_mode_fw.ext_board))
+ return PTR_ERR(ar->normal_mode_fw.ext_board);
+
+ ar->normal_mode_fw.ext_board_data = ar->normal_mode_fw.ext_board->data;
+ ar->normal_mode_fw.ext_board_len = ar->normal_mode_fw.ext_board->size;
+ }
+
+ return 0;
+}
+
+static int ath10k_core_parse_bd_ie_board(struct ath10k *ar,
+ const void *buf, size_t buf_len,
+ const char *boardname,
+ int bd_ie_type)
+{
+ const struct ath10k_fw_ie *hdr;
+ bool name_match_found;
+ int ret, board_ie_id;
+ size_t board_ie_len;
+ const void *board_ie_data;
+
+ name_match_found = false;
+
+ /* go through ATH10K_BD_IE_BOARD_ elements */
+ while (buf_len > sizeof(struct ath10k_fw_ie)) {
+ hdr = buf;
+ board_ie_id = le32_to_cpu(hdr->id);
+ board_ie_len = le32_to_cpu(hdr->len);
+ board_ie_data = hdr->data;
+
+ buf_len -= sizeof(*hdr);
+ buf += sizeof(*hdr);
+
+ if (buf_len < ALIGN(board_ie_len, 4)) {
+ ath10k_err(ar, "invalid ATH10K_BD_IE_BOARD length: %zu < %zu\n",
+ buf_len, ALIGN(board_ie_len, 4));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (board_ie_id) {
+ case ATH10K_BD_IE_BOARD_NAME:
+ ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "board name", "",
+ board_ie_data, board_ie_len);
+
+ if (board_ie_len != strlen(boardname))
+ break;
+
+ ret = memcmp(board_ie_data, boardname, strlen(boardname));
+ if (ret)
+ break;
+
+ name_match_found = true;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot found match for name '%s'",
+ boardname);
+ break;
+ case ATH10K_BD_IE_BOARD_DATA:
+ if (!name_match_found)
+ /* no match found */
+ break;
+
+ if (bd_ie_type == ATH10K_BD_IE_BOARD) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot found board data for '%s'",
+ boardname);
+
+ ar->normal_mode_fw.board_data = board_ie_data;
+ ar->normal_mode_fw.board_len = board_ie_len;
+ } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot found eboard data for '%s'",
+ boardname);
+
+ ar->normal_mode_fw.ext_board_data = board_ie_data;
+ ar->normal_mode_fw.ext_board_len = board_ie_len;
+ }
+
+ ret = 0;
+ goto out;
+ default:
+ ath10k_warn(ar, "unknown ATH10K_BD_IE_BOARD found: %d\n",
+ board_ie_id);
+ break;
+ }
+
+ /* jump over the padding */
+ board_ie_len = ALIGN(board_ie_len, 4);
+
+ buf_len -= board_ie_len;
+ buf += board_ie_len;
+ }
+
+ /* no match found */
+ ret = -ENOENT;
+
+out:
+ return ret;
+}
+
+static int ath10k_core_search_bd(struct ath10k *ar,
+ const char *boardname,
+ const u8 *data,
+ size_t len)
+{
+ size_t ie_len;
+ struct ath10k_fw_ie *hdr;
+ int ret = -ENOENT, ie_id;
+
+ while (len > sizeof(struct ath10k_fw_ie)) {
+ hdr = (struct ath10k_fw_ie *)data;
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data = hdr->data;
+
+ if (len < ALIGN(ie_len, 4)) {
+ ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+ ie_id, ie_len, len);
+ return -EINVAL;
+ }
+
+ switch (ie_id) {
+ case ATH10K_BD_IE_BOARD:
+ ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
+ boardname,
+ ATH10K_BD_IE_BOARD);
+ if (ret == -ENOENT)
+ /* no match found, continue */
+ break;
+
+ /* either found or error, so stop searching */
+ goto out;
+ case ATH10K_BD_IE_BOARD_EXT:
+ ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
+ boardname,
+ ATH10K_BD_IE_BOARD_EXT);
+ if (ret == -ENOENT)
+ /* no match found, continue */
+ break;
+
+ /* either found or error, so stop searching */
+ goto out;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+out:
+ /* return result of parse_bd_ie_board() or -ENOENT */
+ return ret;
+}
+
+static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
+ const char *boardname,
+ const char *fallback_boardname1,
+ const char *fallback_boardname2,
+ const char *filename)
+{
+ size_t len, magic_len;
+ const u8 *data;
+ int ret;
+
+ /* Skip if already fetched during board data download */
+ if (!ar->normal_mode_fw.board)
+ ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ filename);
+ if (IS_ERR(ar->normal_mode_fw.board))
+ return PTR_ERR(ar->normal_mode_fw.board);
+
+ data = ar->normal_mode_fw.board->data;
+ len = ar->normal_mode_fw.board->size;
+
+ /* magic has extra null byte padded */
+ magic_len = strlen(ATH10K_BOARD_MAGIC) + 1;
+ if (len < magic_len) {
+ ath10k_err(ar, "failed to find magic value in %s/%s, file too short: %zu\n",
+ ar->hw_params.fw.dir, filename, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH10K_BOARD_MAGIC, magic_len)) {
+ ath10k_err(ar, "found invalid board magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* magic is padded to 4 bytes */
+ magic_len = ALIGN(magic_len, 4);
+ if (len < magic_len) {
+ ath10k_err(ar, "failed: %s/%s too small to contain board data, len: %zu\n",
+ ar->hw_params.fw.dir, filename, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ data += magic_len;
+ len -= magic_len;
+
+ /* attempt to find boardname in the IE list */
+ ret = ath10k_core_search_bd(ar, boardname, data, len);
+
+ /* if we didn't find it and have a fallback name, try that */
+ if (ret == -ENOENT && fallback_boardname1)
+ ret = ath10k_core_search_bd(ar, fallback_boardname1, data, len);
+
+ if (ret == -ENOENT && fallback_boardname2)
+ ret = ath10k_core_search_bd(ar, fallback_boardname2, data, len);
+
+ if (ret == -ENOENT) {
+ ath10k_err(ar,
+ "failed to fetch board data for %s from %s/%s\n",
+ boardname, ar->hw_params.fw.dir, filename);
+ ret = -ENODATA;
+ }
+
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ ath10k_core_free_board_files(ar);
+ return ret;
+}
+
+static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
+ size_t name_len, bool with_variant,
+ bool with_chip_id)
+{
+ /* strlen(',variant=') + strlen(ar->id.bdf_ext) */
+ char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = {};
+
+ if (with_variant && ar->id.bdf_ext[0] != '\0')
+ scnprintf(variant, sizeof(variant), ",variant=%s",
+ ar->id.bdf_ext);
+
+ if (ar->id.bmi_ids_valid) {
+ scnprintf(name, name_len,
+ "bus=%s,bmi-chip-id=%d,bmi-board-id=%d%s",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.bmi_chip_id,
+ ar->id.bmi_board_id, variant);
+ goto out;
+ }
+
+ if (ar->id.qmi_ids_valid) {
+ if (with_chip_id)
+ scnprintf(name, name_len,
+ "bus=%s,qmi-board-id=%x,qmi-chip-id=%x%s",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.qmi_board_id, ar->id.qmi_chip_id,
+ variant);
+ else
+ scnprintf(name, name_len,
+ "bus=%s,qmi-board-id=%x",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.qmi_board_id);
+ goto out;
+ }
+
+ scnprintf(name, name_len,
+ "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.vendor, ar->id.device,
+ ar->id.subsystem_vendor, ar->id.subsystem_device, variant);
+out:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name);
+
+ return 0;
+}
+
+static int ath10k_core_create_eboard_name(struct ath10k *ar, char *name,
+ size_t name_len)
+{
+ if (ar->id.bmi_ids_valid) {
+ scnprintf(name, name_len,
+ "bus=%s,bmi-chip-id=%d,bmi-eboard-id=%d",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.bmi_chip_id,
+ ar->id.bmi_eboard_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using eboard name '%s'\n", name);
+ return 0;
+ }
+ /* Fallback if returned board id is zero */
+ return -1;
+}
+
+int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type)
+{
+ char boardname[100], fallback_boardname1[100], fallback_boardname2[100];
+ int ret;
+
+ if (bd_ie_type == ATH10K_BD_IE_BOARD) {
+ /* With variant and chip id */
+ ret = ath10k_core_create_board_name(ar, boardname,
+ sizeof(boardname), true,
+ true);
+ if (ret) {
+ ath10k_err(ar, "failed to create board name: %d", ret);
+ return ret;
+ }
+
+ /* Without variant and only chip-id */
+ ret = ath10k_core_create_board_name(ar, fallback_boardname1,
+ sizeof(boardname), false,
+ true);
+ if (ret) {
+ ath10k_err(ar, "failed to create 1st fallback board name: %d",
+ ret);
+ return ret;
+ }
+
+ /* Without variant and without chip-id */
+ ret = ath10k_core_create_board_name(ar, fallback_boardname2,
+ sizeof(boardname), false,
+ false);
+ if (ret) {
+ ath10k_err(ar, "failed to create 2nd fallback board name: %d",
+ ret);
+ return ret;
+ }
+ } else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) {
+ ret = ath10k_core_create_eboard_name(ar, boardname,
+ sizeof(boardname));
+ if (ret) {
+ ath10k_err(ar, "fallback to eboard.bin since board id 0");
+ goto fallback;
+ }
+ }
+
+ ar->bd_api = 2;
+ ret = ath10k_core_fetch_board_data_api_n(ar, boardname,
+ fallback_boardname1,
+ fallback_boardname2,
+ ATH10K_BOARD_API2_FILE);
+ if (!ret)
+ goto success;
+
+fallback:
+ ar->bd_api = 1;
+ ret = ath10k_core_fetch_board_data_api_1(ar, bd_ie_type);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch board-2.bin or board.bin from %s\n",
+ ar->hw_params.fw.dir);
+ return ret;
+ }
+
+success:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api);
+ return 0;
+}
+EXPORT_SYMBOL(ath10k_core_fetch_board_file);
+
+static int ath10k_core_get_ext_board_id_from_otp(struct ath10k *ar)
+{
+ u32 result, address;
+ u8 ext_board_id;
+ int ret;
+
+ address = ar->hw_params.patch_load_addr;
+
+ if (!ar->normal_mode_fw.fw_file.otp_data ||
+ !ar->normal_mode_fw.fw_file.otp_len) {
+ ath10k_warn(ar,
+ "failed to retrieve extended board id due to otp binary missing\n");
+ return -ENODATA;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot upload otp to 0x%x len %zd for ext board id\n",
+ address, ar->normal_mode_fw.fw_file.otp_len);
+
+ ret = ath10k_bmi_fast_download(ar, address,
+ ar->normal_mode_fw.fw_file.otp_data,
+ ar->normal_mode_fw.fw_file.otp_len);
+ if (ret) {
+ ath10k_err(ar, "could not write otp for ext board id check: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EXT_BOARD_ID, &result);
+ if (ret) {
+ ath10k_err(ar, "could not execute otp for ext board id check: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (!result) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "ext board id does not exist in otp, ignore it\n");
+ return -EOPNOTSUPP;
+ }
+
+ ext_board_id = result & ATH10K_BMI_EBOARD_ID_STATUS_MASK;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot get otp ext board id result 0x%08x ext_board_id %d\n",
+ result, ext_board_id);
+
+ ar->id.bmi_eboard_id = ext_board_id;
+
+ return 0;
+}
+
+static int ath10k_download_board_data(struct ath10k *ar, const void *data,
+ size_t data_len)
+{
+ u32 board_data_size = ar->hw_params.fw.board_size;
+ u32 eboard_data_size = ar->hw_params.fw.ext_board_size;
+ u32 board_address;
+ u32 ext_board_address;
+ int ret;
+
+ ret = ath10k_push_board_ext_data(ar, data, data_len);
+ if (ret) {
+ ath10k_err(ar, "could not push board ext data (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_bmi_read32(ar, hi_board_data, &board_address);
+ if (ret) {
+ ath10k_err(ar, "could not read board data addr (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_bmi_write_memory(ar, board_address, data,
+ min_t(u32, board_data_size,
+ data_len));
+ if (ret) {
+ ath10k_err(ar, "could not write board data (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
+ if (ret) {
+ ath10k_err(ar, "could not write board data bit (%d)\n", ret);
+ goto exit;
+ }
+
+ if (!ar->id.ext_bid_supported)
+ goto exit;
+
+ /* Extended board data download */
+ ret = ath10k_core_get_ext_board_id_from_otp(ar);
+ if (ret == -EOPNOTSUPP) {
+ /* Not fetching ext_board_data if ext board id is 0 */
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "otp returned ext board id 0\n");
+ return 0;
+ } else if (ret) {
+ ath10k_err(ar, "failed to get extended board id: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_core_fetch_board_file(ar, ATH10K_BD_IE_BOARD_EXT);
+ if (ret)
+ goto exit;
+
+ if (ar->normal_mode_fw.ext_board_data) {
+ ext_board_address = board_address + EXT_BOARD_ADDRESS_OFFSET;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot writing ext board data to addr 0x%x",
+ ext_board_address);
+ ret = ath10k_bmi_write_memory(ar, ext_board_address,
+ ar->normal_mode_fw.ext_board_data,
+ min_t(u32, eboard_data_size, data_len));
+ if (ret)
+ ath10k_err(ar, "failed to write ext board data: %d\n", ret);
+ }
+
+exit:
+ return ret;
+}
+
+static int ath10k_download_and_run_otp(struct ath10k *ar)
+{
+ u32 result, address = ar->hw_params.patch_load_addr;
+ u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
+ int ret;
+
+ ret = ath10k_download_board_data(ar,
+ ar->running_fw->board_data,
+ ar->running_fw->board_len);
+ if (ret) {
+ ath10k_err(ar, "failed to download board data: %d\n", ret);
+ return ret;
+ }
+
+ /* OTP is optional */
+
+ if (!ar->running_fw->fw_file.otp_data ||
+ !ar->running_fw->fw_file.otp_len) {
+ ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
+ ar->running_fw->fw_file.otp_data,
+ ar->running_fw->fw_file.otp_len);
+ return 0;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
+ address, ar->running_fw->fw_file.otp_len);
+
+ ret = ath10k_bmi_fast_download(ar, address,
+ ar->running_fw->fw_file.otp_data,
+ ar->running_fw->fw_file.otp_len);
+ if (ret) {
+ ath10k_err(ar, "could not write otp (%d)\n", ret);
+ return ret;
+ }
+
+ /* As of now pre-cal is valid for 10_4 variants */
+ if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
+ ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE ||
+ ar->cal_mode == ATH10K_PRE_CAL_MODE_NVMEM)
+ bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL;
+
+ ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
+ if (ret) {
+ ath10k_err(ar, "could not execute otp (%d)\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
+
+ if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+ ar->running_fw->fw_file.fw_features)) &&
+ result != 0) {
+ ath10k_err(ar, "otp calibration failed: %d", result);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ath10k_download_cal_file(struct ath10k *ar,
+ const struct firmware *file)
+{
+ int ret;
+
+ if (!file)
+ return -ENOENT;
+
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ret = ath10k_download_board_data(ar, file->data, file->size);
+ if (ret) {
+ ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n");
+
+ return 0;
+}
+
+static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name)
+{
+ struct device_node *node;
+ int data_len;
+ void *data;
+ int ret;
+
+ node = ar->dev->of_node;
+ if (!node)
+ /* Device Tree is optional, don't print any warnings if
+ * there's no node for ath10k.
+ */
+ return -ENOENT;
+
+ if (!of_get_property(node, dt_name, &data_len)) {
+ /* The calibration data node is optional */
+ return -ENOENT;
+ }
+
+ if (data_len != ar->hw_params.cal_data_len) {
+ ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
+ data_len);
+ ret = -EMSGSIZE;
+ goto out;
+ }
+
+ data = kmalloc(data_len, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = of_property_read_u8_array(node, dt_name, data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = ath10k_download_board_data(ar, data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+
+out:
+ return ret;
+}
+
+static int ath10k_download_cal_eeprom(struct ath10k *ar)
+{
+ size_t data_len;
+ void *data = NULL;
+ int ret;
+
+ ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len);
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = ath10k_download_board_data(ar, data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+
+ return ret;
+}
+
+static int ath10k_download_cal_nvmem(struct ath10k *ar, const char *cell_name)
+{
+ struct nvmem_cell *cell;
+ void *buf;
+ size_t len;
+ int ret;
+
+ cell = devm_nvmem_cell_get(ar->dev, cell_name);
+ if (IS_ERR(cell)) {
+ ret = PTR_ERR(cell);
+ return ret;
+ }
+
+ buf = nvmem_cell_read(cell, &len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ if (ar->hw_params.cal_data_len != len) {
+ kfree(buf);
+ ath10k_warn(ar, "invalid calibration data length in nvmem-cell '%s': %zu != %u\n",
+ cell_name, len, ar->hw_params.cal_data_len);
+ return -EMSGSIZE;
+ }
+
+ ret = ath10k_download_board_data(ar, buf, len);
+ kfree(buf);
+ if (ret)
+ ath10k_warn(ar, "failed to download calibration data from nvmem-cell '%s': %d\n",
+ cell_name, ret);
+
+ return ret;
+}
+
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+ struct ath10k_fw_file *fw_file)
+{
+ size_t magic_len, len, ie_len;
+ int ie_id, i, index, bit, ret;
+ struct ath10k_fw_ie *hdr;
+ const u8 *data;
+ __le32 *timestamp, *version;
+
+ /* first fetch the firmware file (firmware-*.bin) */
+ fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+ name);
+ if (IS_ERR(fw_file->firmware))
+ return PTR_ERR(fw_file->firmware);
+
+ data = fw_file->firmware->data;
+ len = fw_file->firmware->size;
+
+ /* magic also includes the null byte, check that as well */
+ magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
+
+ if (len < magic_len) {
+ ath10k_err(ar, "firmware file '%s/%s' too small to contain magic: %zu\n",
+ ar->hw_params.fw.dir, name, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
+ ath10k_err(ar, "invalid firmware magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* jump over the padding */
+ magic_len = ALIGN(magic_len, 4);
+
+ len -= magic_len;
+ data += magic_len;
+
+ /* loop elements */
+ while (len > sizeof(struct ath10k_fw_ie)) {
+ hdr = (struct ath10k_fw_ie *)data;
+
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data += sizeof(*hdr);
+
+ if (len < ie_len) {
+ ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
+ ie_id, len, ie_len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (ie_id) {
+ case ATH10K_FW_IE_FW_VERSION:
+ if (ie_len > sizeof(fw_file->fw_version) - 1)
+ break;
+
+ memcpy(fw_file->fw_version, data, ie_len);
+ fw_file->fw_version[ie_len] = '\0';
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found fw version %s\n",
+ fw_file->fw_version);
+ break;
+ case ATH10K_FW_IE_TIMESTAMP:
+ if (ie_len != sizeof(u32))
+ break;
+
+ timestamp = (__le32 *)data;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw timestamp %d\n",
+ le32_to_cpup(timestamp));
+ break;
+ case ATH10K_FW_IE_FEATURES:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found firmware features ie (%zd B)\n",
+ ie_len);
+
+ for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+ index = i / 8;
+ bit = i % 8;
+
+ if (index == ie_len)
+ break;
+
+ if (data[index] & (1 << bit)) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "Enabling feature bit: %i\n",
+ i);
+ __set_bit(i, fw_file->fw_features);
+ }
+ }
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
+ fw_file->fw_features,
+ sizeof(fw_file->fw_features));
+ break;
+ case ATH10K_FW_IE_FW_IMAGE:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found fw image ie (%zd B)\n",
+ ie_len);
+
+ fw_file->firmware_data = data;
+ fw_file->firmware_len = ie_len;
+
+ break;
+ case ATH10K_FW_IE_OTP_IMAGE:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found otp image ie (%zd B)\n",
+ ie_len);
+
+ fw_file->otp_data = data;
+ fw_file->otp_len = ie_len;
+
+ break;
+ case ATH10K_FW_IE_WMI_OP_VERSION:
+ if (ie_len != sizeof(u32))
+ break;
+
+ version = (__le32 *)data;
+
+ fw_file->wmi_op_version = le32_to_cpup(version);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
+ fw_file->wmi_op_version);
+ break;
+ case ATH10K_FW_IE_HTT_OP_VERSION:
+ if (ie_len != sizeof(u32))
+ break;
+
+ version = (__le32 *)data;
+
+ fw_file->htt_op_version = le32_to_cpup(version);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
+ fw_file->htt_op_version);
+ break;
+ case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found fw code swap image ie (%zd B)\n",
+ ie_len);
+ fw_file->codeswap_data = data;
+ fw_file->codeswap_len = ie_len;
+ break;
+ default:
+ ath10k_warn(ar, "Unknown FW IE: %u\n",
+ le32_to_cpu(hdr->id));
+ break;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, fw_file->fw_features) &&
+ (!fw_file->firmware_data || !fw_file->firmware_len)) {
+ ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
+ ar->hw_params.fw.dir, name);
+ ret = -ENOMEDIUM;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ath10k_core_free_firmware_files(ar);
+ return ret;
+}
+
+static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name,
+ size_t fw_name_len, int fw_api)
+{
+ switch (ar->hif.bus) {
+ case ATH10K_BUS_SDIO:
+ case ATH10K_BUS_USB:
+ scnprintf(fw_name, fw_name_len, "%s-%s-%d.bin",
+ ATH10K_FW_FILE_BASE, ath10k_bus_str(ar->hif.bus),
+ fw_api);
+ break;
+ case ATH10K_BUS_PCI:
+ case ATH10K_BUS_AHB:
+ case ATH10K_BUS_SNOC:
+ scnprintf(fw_name, fw_name_len, "%s-%d.bin",
+ ATH10K_FW_FILE_BASE, fw_api);
+ break;
+ }
+}
+
+static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+{
+ int ret, i;
+ char fw_name[100];
+
+ /* calibration file is optional, don't check for any errors */
+ ath10k_fetch_cal_file(ar);
+
+ for (i = ATH10K_FW_API_MAX; i >= ATH10K_FW_API_MIN; i--) {
+ ar->fw_api = i;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n",
+ ar->fw_api);
+
+ ath10k_core_get_fw_name(ar, fw_name, sizeof(fw_name), ar->fw_api);
+ ret = ath10k_core_fetch_firmware_api_n(ar, fw_name,
+ &ar->normal_mode_fw.fw_file);
+ if (!ret)
+ goto success;
+ }
+
+ /* we end up here if we couldn't fetch any firmware */
+
+ ath10k_err(ar, "Failed to find firmware-N.bin (N between %d and %d) from %s: %d",
+ ATH10K_FW_API_MIN, ATH10K_FW_API_MAX, ar->hw_params.fw.dir,
+ ret);
+
+ return ret;
+
+success:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+
+ return 0;
+}
+
+static int ath10k_core_pre_cal_download(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_download_cal_nvmem(ar, "pre-calibration");
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_PRE_CAL_MODE_NVMEM;
+ goto success;
+ } else if (ret == -EPROBE_DEFER) {
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find a pre-calibration nvmem-cell, try file next: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_file(ar, ar->pre_cal_file);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE;
+ goto success;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find a pre calibration file, try DT next: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_dt(ar, "qcom,pre-calibration-data");
+ if (ret == -ENOENT)
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "unable to load pre cal data from DT: %d\n", ret);
+ return ret;
+ }
+ ar->cal_mode = ATH10K_PRE_CAL_MODE_DT;
+
+success:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
+ ath10k_cal_mode_str(ar->cal_mode));
+
+ return 0;
+}
+
+static int ath10k_core_pre_cal_config(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_core_pre_cal_download(ar);
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "failed to load pre cal data: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_core_get_board_id_from_otp(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to get board id: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_download_and_run_otp(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to run otp: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "pre cal configuration done successfully\n");
+
+ return 0;
+}
+
+static int ath10k_download_cal_data(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_core_pre_cal_config(ar);
+ if (ret == 0)
+ return 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "pre cal download procedure failed, try cal file: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_nvmem(ar, "calibration");
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_NVMEM;
+ goto done;
+ } else if (ret == -EPROBE_DEFER) {
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find a calibration nvmem-cell, try file next: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_file(ar, ar->cal_file);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_FILE;
+ goto done;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find a calibration file, try DT next: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_dt(ar, "qcom,calibration-data");
+ if (ret == -ENOENT)
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_DT;
+ goto done;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find DT entry, try target EEPROM next: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_eeprom(ar);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_EEPROM;
+ goto done;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find target EEPROM entry, try OTP next: %d\n",
+ ret);
+
+ ret = ath10k_download_and_run_otp(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to run otp: %d\n", ret);
+ return ret;
+ }
+
+ ar->cal_mode = ATH10K_CAL_MODE_OTP;
+
+done:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
+ ath10k_cal_mode_str(ar->cal_mode));
+ return 0;
+}
+
+static void ath10k_core_fetch_btcoex_dt(struct ath10k *ar)
+{
+ struct device_node *node;
+ u8 coex_support = 0;
+ int ret;
+
+ node = ar->dev->of_node;
+ if (!node)
+ goto out;
+
+ ret = of_property_read_u8(node, "qcom,coexist-support", &coex_support);
+ if (ret) {
+ ar->coex_support = true;
+ goto out;
+ }
+
+ if (coex_support) {
+ ar->coex_support = true;
+ } else {
+ ar->coex_support = false;
+ ar->coex_gpio_pin = -1;
+ goto out;
+ }
+
+ ret = of_property_read_u32(node, "qcom,coexist-gpio-pin",
+ &ar->coex_gpio_pin);
+ if (ret)
+ ar->coex_gpio_pin = -1;
+
+out:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot coex_support %d coex_gpio_pin %d\n",
+ ar->coex_support, ar->coex_gpio_pin);
+}
+
+static int ath10k_init_uart(struct ath10k *ar)
+{
+ int ret;
+
+ /*
+ * Explicitly setting UART prints to zero as target turns it on
+ * based on scratch registers.
+ */
+ ret = ath10k_bmi_write32(ar, hi_serial_enable, 0);
+ if (ret) {
+ ath10k_warn(ar, "could not disable UART prints (%d)\n", ret);
+ return ret;
+ }
+
+ if (!uart_print) {
+ if (ar->hw_params.uart_pin_workaround) {
+ ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin,
+ ar->hw_params.uart_pin);
+ if (ret) {
+ ath10k_warn(ar, "failed to set UART TX pin: %d",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+ }
+
+ ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, ar->hw_params.uart_pin);
+ if (ret) {
+ ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_bmi_write32(ar, hi_serial_enable, 1);
+ if (ret) {
+ ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
+ return ret;
+ }
+
+ /* Set the UART baud rate to 19200. */
+ ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
+ if (ret) {
+ ath10k_warn(ar, "could not set the baud rate (%d)\n", ret);
+ return ret;
+ }
+
+ ath10k_info(ar, "UART prints enabled\n");
+ return 0;
+}
+
+static int ath10k_init_hw_params(struct ath10k *ar)
+{
+ const struct ath10k_hw_params *hw_params;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
+ hw_params = &ath10k_hw_params_list[i];
+
+ if (hw_params->bus == ar->hif.bus &&
+ hw_params->id == ar->target_version &&
+ hw_params->dev_id == ar->dev_id)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ath10k_hw_params_list)) {
+ ath10k_err(ar, "Unsupported hardware version: 0x%x\n",
+ ar->target_version);
+ return -EINVAL;
+ }
+
+ ar->hw_params = *hw_params;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n",
+ ar->hw_params.name, ar->target_version);
+
+ return 0;
+}
+
+static void ath10k_core_recovery_check_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, recovery_check_work);
+ long time_left;
+
+ /* Sometimes the recovery will fail and then the next all recovery fail,
+ * so avoid infinite recovery.
+ */
+ if (atomic_read(&ar->fail_cont_count) >= ATH10K_RECOVERY_MAX_FAIL_COUNT) {
+ ath10k_err(ar, "consecutive fail %d times, will shutdown driver!",
+ atomic_read(&ar->fail_cont_count));
+ ar->state = ATH10K_STATE_WEDGED;
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "total recovery count: %d", ++ar->recovery_count);
+
+ if (atomic_read(&ar->pending_recovery)) {
+ /* Sometimes it happened another recovery work before the previous one
+ * completed, then the second recovery work will destroy the previous
+ * one, thus below is to avoid that.
+ */
+ time_left = wait_for_completion_timeout(&ar->driver_recovery,
+ ATH10K_RECOVERY_TIMEOUT_HZ);
+ if (time_left) {
+ ath10k_warn(ar, "previous recovery succeeded, skip this!\n");
+ return;
+ }
+
+ /* Record the continuous recovery fail count when recovery failed. */
+ atomic_inc(&ar->fail_cont_count);
+
+ /* Avoid having multiple recoveries at the same time. */
+ return;
+ }
+
+ atomic_inc(&ar->pending_recovery);
+ queue_work(ar->workqueue, &ar->restart_work);
+}
+
+void ath10k_core_start_recovery(struct ath10k *ar)
+{
+ /* Use workqueue_aux to avoid blocking recovery tracking */
+ queue_work(ar->workqueue_aux, &ar->recovery_check_work);
+}
+EXPORT_SYMBOL(ath10k_core_start_recovery);
+
+void ath10k_core_napi_enable(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags))
+ return;
+
+ napi_enable(&ar->napi);
+ set_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags);
+}
+EXPORT_SYMBOL(ath10k_core_napi_enable);
+
+void ath10k_core_napi_sync_disable(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags))
+ return;
+
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
+ clear_bit(ATH10K_FLAG_NAPI_ENABLED, &ar->dev_flags);
+}
+EXPORT_SYMBOL(ath10k_core_napi_sync_disable);
+
+static void ath10k_core_restart(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+ int ret;
+
+ reinit_completion(&ar->driver_recovery);
+
+ set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+
+ /* Place a barrier to make sure the compiler doesn't reorder
+ * CRASH_FLUSH and calling other functions.
+ */
+ barrier();
+
+ ieee80211_stop_queues(ar->hw);
+ ath10k_drain_tx(ar);
+ complete(&ar->scan.started);
+ complete(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
+ complete(&ar->offchan_tx_completed);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->vdev_delete_done);
+ complete(&ar->thermal.wmi_sync);
+ complete(&ar->bss_survey_done);
+ wake_up(&ar->htt.empty_tx_wq);
+ wake_up(&ar->wmi.tx_credits_wq);
+ wake_up(&ar->peer_mapping_wq);
+
+ /* TODO: We can have one instance of cancelling coverage_class_work by
+ * moving it to ath10k_halt(), so that both stop() and restart() would
+ * call that but it takes conf_mutex() and if we call cancel_work_sync()
+ * with conf_mutex it will deadlock.
+ */
+ cancel_work_sync(&ar->set_coverage_class_work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH10K_STATE_ON:
+ ar->state = ATH10K_STATE_RESTARTING;
+ ath10k_halt(ar);
+ ath10k_scan_finish(ar);
+ ieee80211_restart_hw(ar->hw);
+ break;
+ case ATH10K_STATE_OFF:
+ /* this can happen if driver is being unloaded
+ * or if the crash happens during FW probing
+ */
+ ath10k_warn(ar, "cannot restart a device that hasn't been started\n");
+ break;
+ case ATH10K_STATE_RESTARTING:
+ /* hw restart might be requested from multiple places */
+ break;
+ case ATH10K_STATE_RESTARTED:
+ ar->state = ATH10K_STATE_WEDGED;
+ fallthrough;
+ case ATH10K_STATE_WEDGED:
+ ath10k_warn(ar, "device is wedged, will not restart\n");
+ break;
+ case ATH10K_STATE_UTF:
+ ath10k_warn(ar, "firmware restart in UTF mode not supported\n");
+ break;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ ret = ath10k_coredump_submit(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
+ ret);
+}
+
+static void ath10k_core_set_coverage_class_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ set_coverage_class_work);
+
+ if (ar->hw_params.hw_ops->set_coverage_class)
+ ar->hw_params.hw_ops->set_coverage_class(ar, -1, -1);
+}
+
+static int ath10k_core_init_firmware_features(struct ath10k *ar)
+{
+ struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
+ int max_num_peers;
+
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) &&
+ !test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
+ ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
+ return -EINVAL;
+ }
+
+ if (fw_file->wmi_op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
+ ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n",
+ ATH10K_FW_WMI_OP_VERSION_MAX, fw_file->wmi_op_version);
+ return -EINVAL;
+ }
+
+ ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_NATIVE_WIFI;
+ switch (ath10k_cryptmode_param) {
+ case ATH10K_CRYPT_MODE_HW:
+ clear_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+ clear_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+ break;
+ case ATH10K_CRYPT_MODE_SW:
+ if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
+ fw_file->fw_features)) {
+ ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
+ return -EINVAL;
+ }
+
+ set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+ set_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+ break;
+ default:
+ ath10k_info(ar, "invalid cryptmode: %d\n",
+ ath10k_cryptmode_param);
+ return -EINVAL;
+ }
+
+ ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT;
+ ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT;
+
+ if (ath10k_frame_mode == ATH10K_HW_TXRX_RAW) {
+ if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
+ fw_file->fw_features)) {
+ ath10k_err(ar, "rawmode = 1 requires support from firmware");
+ return -EINVAL;
+ }
+ set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+ }
+
+ if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW;
+
+ /* Workaround:
+ *
+ * Firmware A-MSDU aggregation breaks with RAW Tx encap mode
+ * and causes enormous performance issues (malformed frames,
+ * etc).
+ *
+ * Disabling A-MSDU makes RAW mode stable with heavy traffic
+ * albeit a bit slower compared to regular operation.
+ */
+ ar->htt.max_num_amsdu = 1;
+ }
+
+ /* Backwards compatibility for firmwares without
+ * ATH10K_FW_IE_WMI_OP_VERSION.
+ */
+ if (fw_file->wmi_op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2,
+ fw_file->fw_features))
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
+ else
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+ } else {
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
+ }
+ }
+
+ switch (fw_file->wmi_op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
+ max_num_peers = TARGET_NUM_PEERS;
+ ar->max_num_stations = TARGET_NUM_STATIONS;
+ ar->max_num_vdevs = TARGET_NUM_VDEVS;
+ ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
+ ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
+ WMI_STAT_PEER;
+ ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ if (ath10k_peer_stats_enabled(ar)) {
+ max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS;
+ ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS;
+ } else {
+ max_num_peers = TARGET_10X_NUM_PEERS;
+ ar->max_num_stations = TARGET_10X_NUM_STATIONS;
+ }
+ ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
+ ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
+ ar->fw_stats_req_mask = WMI_STAT_PEER;
+ ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ max_num_peers = TARGET_TLV_NUM_PEERS;
+ ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
+ ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
+ ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
+ if (ar->hif.bus == ATH10K_BUS_SDIO)
+ ar->htt.max_num_pending_tx =
+ TARGET_TLV_NUM_MSDU_DESC_HL;
+ else
+ ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
+ ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
+ ar->fw_stats_req_mask = WMI_TLV_STAT_PDEV | WMI_TLV_STAT_VDEV |
+ WMI_TLV_STAT_PEER | WMI_TLV_STAT_PEER_EXTD;
+ ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+ ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ max_num_peers = TARGET_10_4_NUM_PEERS;
+ ar->max_num_stations = TARGET_10_4_NUM_STATIONS;
+ ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
+ ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
+ ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
+ ar->fw_stats_req_mask = WMI_10_4_STAT_PEER |
+ WMI_10_4_STAT_PEER_EXTD |
+ WMI_10_4_STAT_VDEV_EXTD;
+ ar->max_spatial_stream = ar->hw_params.max_spatial_stream;
+ ar->max_num_tdls_vdevs = TARGET_10_4_NUM_TDLS_VDEVS;
+
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ fw_file->fw_features))
+ ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC_PFC;
+ else
+ ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (ar->hw_params.num_peers)
+ ar->max_num_peers = ar->hw_params.num_peers;
+ else
+ ar->max_num_peers = max_num_peers;
+
+ /* Backwards compatibility for firmwares without
+ * ATH10K_FW_IE_HTT_OP_VERSION.
+ */
+ if (fw_file->htt_op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
+ switch (fw_file->wmi_op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ ath10k_err(ar, "htt op version not found from fw meta data");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_core_reset_rx_filter(struct ath10k *ar)
+{
+ int ret;
+ int vdev_id;
+ int vdev_type;
+ int vdev_subtype;
+ const u8 *vdev_addr;
+
+ vdev_id = 0;
+ vdev_type = WMI_VDEV_TYPE_STA;
+ vdev_subtype = ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
+ vdev_addr = ar->mac_addr;
+
+ ret = ath10k_wmi_vdev_create(ar, vdev_id, vdev_type, vdev_subtype,
+ vdev_addr);
+ if (ret) {
+ ath10k_err(ar, "failed to create dummy vdev: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_vdev_delete(ar, vdev_id);
+ if (ret) {
+ ath10k_err(ar, "failed to delete dummy vdev: %d\n", ret);
+ return ret;
+ }
+
+ /* WMI and HTT may use separate HIF pipes and are not guaranteed to be
+ * serialized properly implicitly.
+ *
+ * Moreover (most) WMI commands have no explicit acknowledges. It is
+ * possible to infer it implicitly by poking firmware with echo
+ * command - getting a reply means all preceding comments have been
+ * (mostly) processed.
+ *
+ * In case of vdev create/delete this is sufficient.
+ *
+ * Without this it's possible to end up with a race when HTT Rx ring is
+ * started before vdev create/delete hack is complete allowing a short
+ * window of opportunity to receive (and Tx ACK) a bunch of frames.
+ */
+ ret = ath10k_wmi_barrier(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to ping firmware: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_core_compat_services(struct ath10k *ar)
+{
+ struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
+
+ /* all 10.x firmware versions support thermal throttling but don't
+ * advertise the support via service flags so we have to hardcode
+ * it here
+ */
+ switch (fw_file->wmi_op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ set_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define TGT_IRAM_READ_PER_ITR (8 * 1024)
+
+static int ath10k_core_copy_target_iram(struct ath10k *ar)
+{
+ const struct ath10k_hw_mem_layout *hw_mem;
+ const struct ath10k_mem_region *tmp, *mem_region = NULL;
+ dma_addr_t paddr;
+ void *vaddr = NULL;
+ u8 num_read_itr;
+ int i, ret;
+ u32 len, remaining_len;
+
+ /* copy target iram feature must work also when
+ * ATH10K_FW_CRASH_DUMP_RAM_DATA is disabled, so
+ * _ath10k_coredump_get_mem_layout() to accomplist that
+ */
+ hw_mem = _ath10k_coredump_get_mem_layout(ar);
+ if (!hw_mem)
+ /* if CONFIG_DEV_COREDUMP is disabled we get NULL, then
+ * just silently disable the feature by doing nothing
+ */
+ return 0;
+
+ for (i = 0; i < hw_mem->region_table.size; i++) {
+ tmp = &hw_mem->region_table.regions[i];
+ if (tmp->type == ATH10K_MEM_REGION_TYPE_REG) {
+ mem_region = tmp;
+ break;
+ }
+ }
+
+ if (!mem_region)
+ return -ENOMEM;
+
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ if (ar->wmi.mem_chunks[i].req_id ==
+ WMI_IRAM_RECOVERY_HOST_MEM_REQ_ID) {
+ vaddr = ar->wmi.mem_chunks[i].vaddr;
+ len = ar->wmi.mem_chunks[i].len;
+ break;
+ }
+ }
+
+ if (!vaddr || !len) {
+ ath10k_warn(ar, "No allocated memory for IRAM back up");
+ return -ENOMEM;
+ }
+
+ len = (len < mem_region->len) ? len : mem_region->len;
+ paddr = mem_region->start;
+ num_read_itr = len / TGT_IRAM_READ_PER_ITR;
+ remaining_len = len % TGT_IRAM_READ_PER_ITR;
+ for (i = 0; i < num_read_itr; i++) {
+ ret = ath10k_hif_diag_read(ar, paddr, vaddr,
+ TGT_IRAM_READ_PER_ITR);
+ if (ret) {
+ ath10k_warn(ar, "failed to copy firmware IRAM contents: %d",
+ ret);
+ return ret;
+ }
+
+ paddr += TGT_IRAM_READ_PER_ITR;
+ vaddr += TGT_IRAM_READ_PER_ITR;
+ }
+
+ if (remaining_len) {
+ ret = ath10k_hif_diag_read(ar, paddr, vaddr, remaining_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to copy firmware IRAM contents: %d",
+ ret);
+ return ret;
+ }
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "target IRAM back up completed\n");
+
+ return 0;
+}
+
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+ const struct ath10k_fw_components *fw)
+{
+ int status;
+ u32 val;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+
+ ar->running_fw = fw;
+
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->running_fw->fw_file.fw_features)) {
+ ath10k_bmi_start(ar);
+
+ /* Enable hardware clock to speed up firmware download */
+ if (ar->hw_params.hw_ops->enable_pll_clk) {
+ status = ar->hw_params.hw_ops->enable_pll_clk(ar);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot enable pll ret %d\n",
+ status);
+ }
+
+ if (ath10k_init_configure_target(ar)) {
+ status = -EINVAL;
+ goto err;
+ }
+
+ status = ath10k_download_cal_data(ar);
+ if (status)
+ goto err;
+
+ /* Some of qca988x solutions are having global reset issue
+ * during target initialization. Bypassing PLL setting before
+ * downloading firmware and letting the SoC run on REF_CLK is
+ * fixing the problem. Corresponding firmware change is also
+ * needed to set the clock source once the target is
+ * initialized.
+ */
+ if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
+ ar->running_fw->fw_file.fw_features)) {
+ status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
+ if (status) {
+ ath10k_err(ar, "could not write to skip_clock_init: %d\n",
+ status);
+ goto err;
+ }
+ }
+
+ status = ath10k_download_fw(ar);
+ if (status)
+ goto err;
+
+ status = ath10k_init_uart(ar);
+ if (status)
+ goto err;
+
+ if (ar->hif.bus == ATH10K_BUS_SDIO) {
+ status = ath10k_init_sdio(ar, mode);
+ if (status) {
+ ath10k_err(ar, "failed to init SDIO: %d\n", status);
+ goto err;
+ }
+ }
+ }
+
+ ar->htc.htc_ops.target_send_suspend_complete =
+ ath10k_send_suspend_complete;
+
+ status = ath10k_htc_init(ar);
+ if (status) {
+ ath10k_err(ar, "could not init HTC (%d)\n", status);
+ goto err;
+ }
+
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->running_fw->fw_file.fw_features)) {
+ status = ath10k_bmi_done(ar);
+ if (status)
+ goto err;
+ }
+
+ status = ath10k_wmi_attach(ar);
+ if (status) {
+ ath10k_err(ar, "WMI attach failed: %d\n", status);
+ goto err;
+ }
+
+ status = ath10k_htt_init(ar);
+ if (status) {
+ ath10k_err(ar, "failed to init htt: %d\n", status);
+ goto err_wmi_detach;
+ }
+
+ status = ath10k_htt_tx_start(&ar->htt);
+ if (status) {
+ ath10k_err(ar, "failed to alloc htt tx: %d\n", status);
+ goto err_wmi_detach;
+ }
+
+ /* If firmware indicates Full Rx Reorder support it must be used in a
+ * slightly different manner. Let HTT code know.
+ */
+ ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
+ ar->wmi.svc_map));
+
+ status = ath10k_htt_rx_alloc(&ar->htt);
+ if (status) {
+ ath10k_err(ar, "failed to alloc htt rx: %d\n", status);
+ goto err_htt_tx_detach;
+ }
+
+ status = ath10k_hif_start(ar);
+ if (status) {
+ ath10k_err(ar, "could not start HIF: %d\n", status);
+ goto err_htt_rx_detach;
+ }
+
+ status = ath10k_htc_wait_target(&ar->htc);
+ if (status) {
+ ath10k_err(ar, "failed to connect to HTC: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ status = ath10k_hif_start_post(ar);
+ if (status) {
+ ath10k_err(ar, "failed to swap mailbox: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ status = ath10k_htt_connect(&ar->htt);
+ if (status) {
+ ath10k_err(ar, "failed to connect htt (%d)\n", status);
+ goto err_hif_stop;
+ }
+ }
+
+ status = ath10k_wmi_connect(ar);
+ if (status) {
+ ath10k_err(ar, "could not connect wmi: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ status = ath10k_htc_start(&ar->htc);
+ if (status) {
+ ath10k_err(ar, "failed to start htc: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ status = ath10k_wmi_wait_for_service_ready(ar);
+ if (status) {
+ ath10k_warn(ar, "wmi service ready event not received");
+ goto err_hif_stop;
+ }
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
+ ar->hw->wiphy->fw_version);
+
+ if (test_bit(ATH10K_FW_FEATURE_IRAM_RECOVERY,
+ ar->running_fw->fw_file.fw_features)) {
+ status = ath10k_core_copy_target_iram(ar);
+ if (status) {
+ ath10k_warn(ar, "failed to copy target iram contents: %d",
+ status);
+ goto err_hif_stop;
+ }
+ }
+
+ if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map) &&
+ mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ val = 0;
+ if (ath10k_peer_stats_enabled(ar))
+ val = WMI_10_4_PEER_STATS;
+
+ /* Enable vdev stats by default */
+ val |= WMI_10_4_VDEV_STATS;
+
+ if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
+ val |= WMI_10_4_BSS_CHANNEL_INFO_64;
+
+ ath10k_core_fetch_btcoex_dt(ar);
+
+ /* 10.4 firmware supports BT-Coex without reloading firmware
+ * via pdev param. To support Bluetooth coexistence pdev param,
+ * WMI_COEX_GPIO_SUPPORT of extended resource config should be
+ * enabled always.
+ *
+ * We can still enable BTCOEX if firmware has the support
+ * even though btceox_support value is
+ * ATH10K_DT_BTCOEX_NOT_FOUND
+ */
+
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
+ test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+ ar->running_fw->fw_file.fw_features) &&
+ ar->coex_support)
+ val |= WMI_10_4_COEX_GPIO_SUPPORT;
+
+ if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+ ar->wmi.svc_map))
+ val |= WMI_10_4_TDLS_EXPLICIT_MODE_ONLY;
+
+ if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ ar->wmi.svc_map))
+ val |= WMI_10_4_TDLS_UAPSD_BUFFER_STA;
+
+ if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI,
+ ar->wmi.svc_map))
+ val |= WMI_10_4_TX_DATA_ACK_RSSI;
+
+ if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map))
+ val |= WMI_10_4_REPORT_AIRTIME;
+
+ if (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
+ ar->wmi.svc_map))
+ val |= WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT;
+
+ status = ath10k_mac_ext_resource_config(ar, val);
+ if (status) {
+ ath10k_err(ar,
+ "failed to send ext resource cfg command : %d\n",
+ status);
+ goto err_hif_stop;
+ }
+ }
+
+ status = ath10k_wmi_cmd_init(ar);
+ if (status) {
+ ath10k_err(ar, "could not send WMI init command (%d)\n",
+ status);
+ goto err_hif_stop;
+ }
+
+ status = ath10k_wmi_wait_for_unified_ready(ar);
+ if (status) {
+ ath10k_err(ar, "wmi unified ready event not received\n");
+ goto err_hif_stop;
+ }
+
+ status = ath10k_core_compat_services(ar);
+ if (status) {
+ ath10k_err(ar, "compat services failed: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ status = ath10k_wmi_pdev_set_base_macaddr(ar, ar->mac_addr);
+ if (status && status != -EOPNOTSUPP) {
+ ath10k_err(ar,
+ "failed to set base mac address: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ /* Some firmware revisions do not properly set up hardware rx filter
+ * registers.
+ *
+ * A known example from QCA9880 and 10.2.4 is that MAC_PCU_ADDR1_MASK
+ * is filled with 0s instead of 1s allowing HW to respond with ACKs to
+ * any frames that matches MAC_PCU_RX_FILTER which is also
+ * misconfigured to accept anything.
+ *
+ * The ADDR1 is programmed using internal firmware structure field and
+ * can't be (easily/sanely) reached from the driver explicitly. It is
+ * possible to implicitly make it correct by creating a dummy vdev and
+ * then deleting it.
+ */
+ if (ar->hw_params.hw_filter_reset_required &&
+ mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ status = ath10k_core_reset_rx_filter(ar);
+ if (status) {
+ ath10k_err(ar,
+ "failed to reset rx filter: %d\n", status);
+ goto err_hif_stop;
+ }
+ }
+
+ status = ath10k_htt_rx_ring_refill(ar);
+ if (status) {
+ ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ if (ar->max_num_vdevs >= 64)
+ ar->free_vdev_map = 0xFFFFFFFFFFFFFFFFLL;
+ else
+ ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
+
+ INIT_LIST_HEAD(&ar->arvifs);
+
+ /* we don't care about HTT in UTF mode */
+ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ status = ath10k_htt_setup(&ar->htt);
+ if (status) {
+ ath10k_err(ar, "failed to setup htt: %d\n", status);
+ goto err_hif_stop;
+ }
+ }
+
+ status = ath10k_debug_start(ar);
+ if (status)
+ goto err_hif_stop;
+
+ status = ath10k_hif_set_target_log_mode(ar, fw_diag_log);
+ if (status && status != -EOPNOTSUPP) {
+ ath10k_warn(ar, "set target log mode failed: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ status = ath10k_leds_start(ar);
+ if (status)
+ goto err_hif_stop;
+
+ return 0;
+
+err_hif_stop:
+ ath10k_hif_stop(ar);
+err_htt_rx_detach:
+ ath10k_htt_rx_free(&ar->htt);
+err_htt_tx_detach:
+ ath10k_htt_tx_free(&ar->htt);
+err_wmi_detach:
+ ath10k_wmi_detach(ar);
+err:
+ return status;
+}
+EXPORT_SYMBOL(ath10k_core_start);
+
+int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
+{
+ int ret;
+ unsigned long time_left;
+
+ reinit_completion(&ar->target_suspend);
+
+ ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt);
+ if (ret) {
+ ath10k_warn(ar, "could not suspend target (%d)\n", ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
+
+ if (!time_left) {
+ ath10k_warn(ar, "suspend timed out - target pause event never came\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+void ath10k_core_stop(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+ ath10k_debug_stop(ar);
+
+ /* try to suspend target */
+ if (ar->state != ATH10K_STATE_RESTARTING &&
+ ar->state != ATH10K_STATE_UTF)
+ ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
+
+ ath10k_hif_stop(ar);
+ ath10k_htt_tx_stop(&ar->htt);
+ ath10k_htt_rx_free(&ar->htt);
+ ath10k_wmi_detach(ar);
+
+ ar->id.bmi_ids_valid = false;
+}
+EXPORT_SYMBOL(ath10k_core_stop);
+
+/* mac80211 manages fw/hw initialization through start/stop hooks. However in
+ * order to know what hw capabilities should be advertised to mac80211 it is
+ * necessary to load the firmware (and tear it down immediately since start
+ * hook will try to init it again) before registering
+ */
+static int ath10k_core_probe_fw(struct ath10k *ar)
+{
+ struct bmi_target_info target_info = {};
+ int ret = 0;
+
+ ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+ if (ret) {
+ ath10k_err(ar, "could not power on hif bus (%d)\n", ret);
+ return ret;
+ }
+
+ switch (ar->hif.bus) {
+ case ATH10K_BUS_SDIO:
+ ret = ath10k_bmi_get_target_info_sdio(ar, &target_info);
+ if (ret) {
+ ath10k_err(ar, "could not get target info (%d)\n", ret);
+ goto err_power_down;
+ }
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
+ break;
+ case ATH10K_BUS_PCI:
+ case ATH10K_BUS_AHB:
+ case ATH10K_BUS_USB:
+ ret = ath10k_bmi_get_target_info(ar, &target_info);
+ if (ret) {
+ ath10k_err(ar, "could not get target info (%d)\n", ret);
+ goto err_power_down;
+ }
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
+ break;
+ case ATH10K_BUS_SNOC:
+ ret = ath10k_hif_get_target_info(ar, &target_info);
+ if (ret) {
+ ath10k_err(ar, "could not get target info (%d)\n", ret);
+ goto err_power_down;
+ }
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
+ break;
+ default:
+ ath10k_err(ar, "incorrect hif bus type: %d\n", ar->hif.bus);
+ }
+
+ ret = ath10k_init_hw_params(ar);
+ if (ret) {
+ ath10k_err(ar, "could not get hw params (%d)\n", ret);
+ goto err_power_down;
+ }
+
+ ret = ath10k_core_fetch_firmware_files(ar);
+ if (ret) {
+ ath10k_err(ar, "could not fetch firmware files (%d)\n", ret);
+ goto err_power_down;
+ }
+
+ BUILD_BUG_ON(sizeof(ar->hw->wiphy->fw_version) !=
+ sizeof(ar->normal_mode_fw.fw_file.fw_version));
+ memcpy(ar->hw->wiphy->fw_version, ar->normal_mode_fw.fw_file.fw_version,
+ sizeof(ar->hw->wiphy->fw_version));
+
+ ath10k_debug_print_hwfw_info(ar);
+
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
+ ret = ath10k_core_pre_cal_download(ar);
+ if (ret) {
+ /* pre calibration data download is not necessary
+ * for all the chipsets. Ignore failures and continue.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "could not load pre cal data: %d\n", ret);
+ }
+
+ ret = ath10k_core_get_board_id_from_otp(ar);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_err(ar, "failed to get board id from otp: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
+
+ ret = ath10k_core_check_smbios(ar);
+ if (ret)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n");
+
+ ret = ath10k_core_check_dt(ar);
+ if (ret)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
+
+ ret = ath10k_core_fetch_board_file(ar, ATH10K_BD_IE_BOARD);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+ goto err_free_firmware_files;
+ }
+
+ ath10k_debug_print_board_info(ar);
+ }
+
+ device_get_mac_address(ar->dev, ar->mac_addr);
+
+ ret = ath10k_core_init_firmware_features(ar);
+ if (ret) {
+ ath10k_err(ar, "fatal problem with firmware features: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
+
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
+ ret = ath10k_swap_code_seg_init(ar,
+ &ar->normal_mode_fw.fw_file);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize code swap segment: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+ &ar->normal_mode_fw);
+ if (ret) {
+ ath10k_err(ar, "could not init core (%d)\n", ret);
+ goto err_unlock;
+ }
+
+ ath10k_debug_print_boot_info(ar);
+ ath10k_core_stop(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ ath10k_hif_power_down(ar);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+
+err_free_firmware_files:
+ ath10k_core_free_firmware_files(ar);
+
+err_power_down:
+ ath10k_hif_power_down(ar);
+
+ return ret;
+}
+
+static void ath10k_core_register_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, register_work);
+ int status;
+
+ /* peer stats are enabled by default */
+ set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+
+ status = ath10k_core_probe_fw(ar);
+ if (status) {
+ ath10k_err(ar, "could not probe fw (%d)\n", status);
+ goto err;
+ }
+
+ status = ath10k_mac_register(ar);
+ if (status) {
+ ath10k_err(ar, "could not register to mac80211 (%d)\n", status);
+ goto err_release_fw;
+ }
+
+ status = ath10k_coredump_register(ar);
+ if (status) {
+ ath10k_err(ar, "unable to register coredump\n");
+ goto err_unregister_mac;
+ }
+
+ status = ath10k_debug_register(ar);
+ if (status) {
+ ath10k_err(ar, "unable to initialize debugfs\n");
+ goto err_unregister_coredump;
+ }
+
+ status = ath10k_spectral_create(ar);
+ if (status) {
+ ath10k_err(ar, "failed to initialize spectral\n");
+ goto err_debug_destroy;
+ }
+
+ status = ath10k_thermal_register(ar);
+ if (status) {
+ ath10k_err(ar, "could not register thermal device: %d\n",
+ status);
+ goto err_spectral_destroy;
+ }
+
+ status = ath10k_leds_register(ar);
+ if (status) {
+ ath10k_err(ar, "could not register leds: %d\n",
+ status);
+ goto err_thermal_unregister;
+ }
+
+ set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
+ return;
+
+err_thermal_unregister:
+ ath10k_thermal_unregister(ar);
+err_spectral_destroy:
+ ath10k_spectral_destroy(ar);
+err_debug_destroy:
+ ath10k_debug_destroy(ar);
+err_unregister_coredump:
+ ath10k_coredump_unregister(ar);
+err_unregister_mac:
+ ath10k_mac_unregister(ar);
+err_release_fw:
+ ath10k_core_free_firmware_files(ar);
+err:
+ /* TODO: It's probably a good idea to release device from the driver
+ * but calling device_release_driver() here will cause a deadlock.
+ */
+ return;
+}
+
+int ath10k_core_register(struct ath10k *ar,
+ const struct ath10k_bus_params *bus_params)
+{
+ ar->bus_param = *bus_params;
+
+ queue_work(ar->workqueue, &ar->register_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath10k_core_register);
+
+void ath10k_core_unregister(struct ath10k *ar)
+{
+ cancel_work_sync(&ar->register_work);
+
+ if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
+ return;
+
+ ath10k_leds_unregister(ar);
+
+ ath10k_thermal_unregister(ar);
+ /* Stop spectral before unregistering from mac80211 to remove the
+ * relayfs debugfs file cleanly. Otherwise the parent debugfs tree
+ * would be already be free'd recursively, leading to a double free.
+ */
+ ath10k_spectral_destroy(ar);
+
+ /* We must unregister from mac80211 before we stop HTC and HIF.
+ * Otherwise we will fail to submit commands to FW and mac80211 will be
+ * unhappy about callback failures.
+ */
+ ath10k_mac_unregister(ar);
+
+ ath10k_testmode_destroy(ar);
+
+ ath10k_core_free_firmware_files(ar);
+ ath10k_core_free_board_files(ar);
+
+ ath10k_debug_unregister(ar);
+}
+EXPORT_SYMBOL(ath10k_core_unregister);
+
+struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
+ enum ath10k_bus bus,
+ enum ath10k_hw_rev hw_rev,
+ const struct ath10k_hif_ops *hif_ops)
+{
+ struct ath10k *ar;
+ int ret;
+
+ ar = ath10k_mac_create(priv_size);
+ if (!ar)
+ return NULL;
+
+ ar->ath_common.priv = ar;
+ ar->ath_common.hw = ar->hw;
+ ar->dev = dev;
+ ar->hw_rev = hw_rev;
+ ar->hif.ops = hif_ops;
+ ar->hif.bus = bus;
+
+ switch (hw_rev) {
+ case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
+ ar->regs = &qca988x_regs;
+ ar->hw_ce_regs = &qcax_ce_regs;
+ ar->hw_values = &qca988x_values;
+ break;
+ case ATH10K_HW_QCA6174:
+ case ATH10K_HW_QCA9377:
+ ar->regs = &qca6174_regs;
+ ar->hw_ce_regs = &qcax_ce_regs;
+ ar->hw_values = &qca6174_values;
+ break;
+ case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
+ ar->regs = &qca99x0_regs;
+ ar->hw_ce_regs = &qcax_ce_regs;
+ ar->hw_values = &qca99x0_values;
+ break;
+ case ATH10K_HW_QCA9888:
+ ar->regs = &qca99x0_regs;
+ ar->hw_ce_regs = &qcax_ce_regs;
+ ar->hw_values = &qca9888_values;
+ break;
+ case ATH10K_HW_QCA4019:
+ ar->regs = &qca4019_regs;
+ ar->hw_ce_regs = &qcax_ce_regs;
+ ar->hw_values = &qca4019_values;
+ break;
+ case ATH10K_HW_WCN3990:
+ ar->regs = &wcn3990_regs;
+ ar->hw_ce_regs = &wcn3990_ce_regs;
+ ar->hw_values = &wcn3990_values;
+ break;
+ default:
+ ath10k_err(ar, "unsupported core hardware revision %d\n",
+ hw_rev);
+ ret = -EOPNOTSUPP;
+ goto err_free_mac;
+ }
+
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+ init_completion(&ar->scan.on_channel);
+ init_completion(&ar->target_suspend);
+ init_completion(&ar->driver_recovery);
+ init_completion(&ar->wow.wakeup_completed);
+
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->vdev_delete_done);
+ init_completion(&ar->thermal.wmi_sync);
+ init_completion(&ar->bss_survey_done);
+ init_completion(&ar->peer_delete_done);
+ init_completion(&ar->peer_stats_info_complete);
+
+ INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
+
+ ar->workqueue = create_singlethread_workqueue("ath10k_wq");
+ if (!ar->workqueue)
+ goto err_free_mac;
+
+ ar->workqueue_aux = create_singlethread_workqueue("ath10k_aux_wq");
+ if (!ar->workqueue_aux)
+ goto err_free_wq;
+
+ ar->workqueue_tx_complete =
+ create_singlethread_workqueue("ath10k_tx_complete_wq");
+ if (!ar->workqueue_tx_complete)
+ goto err_free_aux_wq;
+
+ mutex_init(&ar->conf_mutex);
+ mutex_init(&ar->dump_mutex);
+ spin_lock_init(&ar->data_lock);
+
+ for (int ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ spin_lock_init(&ar->queue_lock[ac]);
+
+ INIT_LIST_HEAD(&ar->peers);
+ init_waitqueue_head(&ar->peer_mapping_wq);
+ init_waitqueue_head(&ar->htt.empty_tx_wq);
+ init_waitqueue_head(&ar->wmi.tx_credits_wq);
+
+ skb_queue_head_init(&ar->htt.rx_indication_head);
+
+ init_completion(&ar->offchan_tx_completed);
+ INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
+ skb_queue_head_init(&ar->offchan_tx_queue);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
+ INIT_WORK(&ar->register_work, ath10k_core_register_work);
+ INIT_WORK(&ar->restart_work, ath10k_core_restart);
+ INIT_WORK(&ar->recovery_check_work, ath10k_core_recovery_check_work);
+ INIT_WORK(&ar->set_coverage_class_work,
+ ath10k_core_set_coverage_class_work);
+
+ ar->napi_dev = alloc_netdev_dummy(0);
+ if (!ar->napi_dev)
+ goto err_free_tx_complete;
+
+ ret = ath10k_coredump_create(ar);
+ if (ret)
+ goto err_free_netdev;
+
+ ret = ath10k_debug_create(ar);
+ if (ret)
+ goto err_free_coredump;
+
+ return ar;
+
+err_free_coredump:
+ ath10k_coredump_destroy(ar);
+err_free_netdev:
+ free_netdev(ar->napi_dev);
+err_free_tx_complete:
+ destroy_workqueue(ar->workqueue_tx_complete);
+err_free_aux_wq:
+ destroy_workqueue(ar->workqueue_aux);
+err_free_wq:
+ destroy_workqueue(ar->workqueue);
+err_free_mac:
+ ath10k_mac_destroy(ar);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ath10k_core_create);
+
+void ath10k_core_destroy(struct ath10k *ar)
+{
+ destroy_workqueue(ar->workqueue);
+
+ destroy_workqueue(ar->workqueue_aux);
+
+ destroy_workqueue(ar->workqueue_tx_complete);
+
+ free_netdev(ar->napi_dev);
+ ath10k_debug_destroy(ar);
+ ath10k_coredump_destroy(ar);
+ ath10k_htt_tx_destroy(&ar->htt);
+ ath10k_wmi_free_host_mem(ar);
+ ath10k_mac_destroy(ar);
+}
+EXPORT_SYMBOL(ath10k_core_destroy);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ac wireless LAN cards.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
new file mode 100644
index 000000000000..73a9db302245
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -0,0 +1,1368 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _CORE_H_
+#define _CORE_H_
+
+#include <linux/completion.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/uuid.h>
+#include <linux/time.h>
+#include <linux/leds.h>
+
+#include "htt.h"
+#include "htc.h"
+#include "hw.h"
+#include "targaddrs.h"
+#include "wmi.h"
+#include "../ath.h"
+#include "../regd.h"
+#include "../dfs_pattern_detector.h"
+#include "spectral.h"
+#include "thermal.h"
+#include "wow.h"
+#include "swap.h"
+
+#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
+#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
+#define WO(_f) ((_f##_OFFSET) >> 2)
+
+#define ATH10K_SCAN_ID 0
+#define ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD 10 /* msec */
+#define WMI_READY_TIMEOUT (5 * HZ)
+#define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_CONNECTION_LOSS_HZ (3 * HZ)
+#define ATH10K_NUM_CHANS 41
+#define ATH10K_MAX_5G_CHAN 173
+
+/* Antenna noise floor */
+#define ATH10K_DEFAULT_NOISE_FLOOR -95
+
+#define ATH10K_INVALID_RSSI 128
+
+#define ATH10K_MAX_NUM_MGMT_PENDING 128
+
+/* number of failed packets (20 packets with 16 sw reties each) */
+#define ATH10K_KICKOUT_THRESHOLD (20 * 16)
+
+/*
+ * Use insanely high numbers to make sure that the firmware implementation
+ * won't start, we have the same functionality already in hostapd. Unit
+ * is seconds.
+ */
+#define ATH10K_KEEPALIVE_MIN_IDLE 3747
+#define ATH10K_KEEPALIVE_MAX_IDLE 3895
+#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+
+/* SMBIOS type containing Board Data File Name Extension */
+#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8
+
+/* SMBIOS type structure length (excluding strings-set) */
+#define ATH10K_SMBIOS_BDF_EXT_LENGTH 0x9
+
+/* Offset pointing to Board Data File Name Extension */
+#define ATH10K_SMBIOS_BDF_EXT_OFFSET 0x8
+
+/* Board Data File Name Extension string length.
+ * String format: BDF_<Customer ID>_<Extension>\0
+ */
+#define ATH10K_SMBIOS_BDF_EXT_STR_LENGTH 0x20
+
+/* The magic used by QCA spec */
+#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_"
+
+/* Default Airtime weight multiplier (Tuned for multiclient performance) */
+#define ATH10K_AIRTIME_WEIGHT_MULTIPLIER 4
+
+#define ATH10K_MAX_RETRY_COUNT 30
+
+#define ATH10K_ITER_NORMAL_FLAGS (IEEE80211_IFACE_ITER_NORMAL | \
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER)
+#define ATH10K_ITER_RESUME_FLAGS (IEEE80211_IFACE_ITER_RESUME_ALL |\
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER)
+#define ATH10K_RECOVERY_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_RECOVERY_MAX_FAIL_COUNT 4
+
+struct ath10k;
+
+static inline const char *ath10k_bus_str(enum ath10k_bus bus)
+{
+ switch (bus) {
+ case ATH10K_BUS_PCI:
+ return "pci";
+ case ATH10K_BUS_AHB:
+ return "ahb";
+ case ATH10K_BUS_SDIO:
+ return "sdio";
+ case ATH10K_BUS_USB:
+ return "usb";
+ case ATH10K_BUS_SNOC:
+ return "snoc";
+ }
+
+ return "unknown";
+}
+
+enum ath10k_skb_flags {
+ ATH10K_SKB_F_NO_HWCRYPT = BIT(0),
+ ATH10K_SKB_F_DTIM_ZERO = BIT(1),
+ ATH10K_SKB_F_DELIVER_CAB = BIT(2),
+ ATH10K_SKB_F_MGMT = BIT(3),
+ ATH10K_SKB_F_QOS = BIT(4),
+ ATH10K_SKB_F_RAW_TX = BIT(5),
+ ATH10K_SKB_F_NOACK_TID = BIT(6),
+};
+
+struct ath10k_skb_cb {
+ dma_addr_t paddr;
+ u8 flags;
+ u8 eid;
+ u16 msdu_id;
+ u16 airtime_est;
+ struct ieee80211_vif *vif;
+ struct ieee80211_txq *txq;
+ u32 ucast_cipher;
+} __packed;
+
+struct ath10k_skb_rxcb {
+ dma_addr_t paddr;
+ struct hlist_node hlist;
+ u8 eid;
+};
+
+static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
+ IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+ return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
+}
+
+static inline struct ath10k_skb_rxcb *ATH10K_SKB_RXCB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath10k_skb_rxcb) > sizeof(skb->cb));
+ return (struct ath10k_skb_rxcb *)skb->cb;
+}
+
+#define ATH10K_RXCB_SKB(rxcb) \
+ container_of((void *)rxcb, struct sk_buff, cb)
+
+static inline u32 host_interest_item_address(u32 item_offset)
+{
+ return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
+}
+
+enum ath10k_phy_mode {
+ ATH10K_PHY_MODE_LEGACY = 0,
+ ATH10K_PHY_MODE_HT = 1,
+ ATH10K_PHY_MODE_VHT = 2,
+};
+
+/* Data rate 100KBPS based on IE Index */
+struct ath10k_index_ht_data_rate_type {
+ u8 beacon_rate_index;
+ u16 supported_rate[4];
+};
+
+/* Data rate 100KBPS based on IE Index */
+struct ath10k_index_vht_data_rate_type {
+ u8 beacon_rate_index;
+ u16 supported_VHT80_rate[2];
+ u16 supported_VHT40_rate[2];
+ u16 supported_VHT20_rate[2];
+};
+
+struct ath10k_bmi {
+ bool done_sent;
+};
+
+struct ath10k_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
+struct ath10k_wmi {
+ enum ath10k_htc_ep_id eid;
+ struct completion service_ready;
+ struct completion unified_ready;
+ struct completion barrier;
+ struct completion radar_confirm;
+ wait_queue_head_t tx_credits_wq;
+ DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX);
+ struct wmi_cmd_map *cmd;
+ struct wmi_vdev_param_map *vdev_param;
+ struct wmi_pdev_param_map *pdev_param;
+ struct wmi_peer_param_map *peer_param;
+ const struct wmi_ops *ops;
+ const struct wmi_peer_flags_map *peer_flags;
+
+ u32 mgmt_max_num_pending_tx;
+
+ /* Protected by data_lock */
+ struct idr mgmt_pending_tx;
+
+ u32 num_mem_chunks;
+ u32 rx_decap_mode;
+ struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
+};
+
+struct ath10k_fw_stats_peer {
+ struct list_head list;
+
+ u8 peer_macaddr[ETH_ALEN];
+ u32 peer_rssi;
+ u32 peer_tx_rate;
+ u32 peer_rx_rate; /* 10x only */
+ u64 rx_duration;
+};
+
+struct ath10k_fw_extd_stats_peer {
+ struct list_head list;
+
+ u8 peer_macaddr[ETH_ALEN];
+ u64 rx_duration;
+};
+
+struct ath10k_fw_stats_vdev {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[4];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[4];
+ u32 num_tx_frames_failures[4];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[10];
+ u32 beacon_rssi_history[10];
+};
+
+struct ath10k_fw_stats_vdev_extd {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 ppdu_aggr_cnt;
+ u32 ppdu_noack;
+ u32 mpdu_queued;
+ u32 ppdu_nonaggr_cnt;
+ u32 mpdu_sw_requeued;
+ u32 mpdu_suc_retry;
+ u32 mpdu_suc_multitry;
+ u32 mpdu_fail_retry;
+ u32 tx_ftm_suc;
+ u32 tx_ftm_suc_retry;
+ u32 tx_ftm_fail;
+ u32 rx_ftmr_cnt;
+ u32 rx_ftmr_dup_cnt;
+ u32 rx_iftmr_cnt;
+ u32 rx_iftmr_dup_cnt;
+};
+
+struct ath10k_fw_stats_pdev {
+ struct list_head list;
+
+ /* PDEV stats */
+ s32 ch_noise_floor;
+ u32 tx_frame_count; /* Cycles spent transmitting frames */
+ u32 rx_frame_count; /* Cycles spent receiving frames */
+ u32 rx_clear_count; /* Total channel busy time, evidently */
+ u32 cycle_count; /* Total on-channel time */
+ u32 phy_err_count;
+ u32 chan_tx_power;
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+
+ /* PDEV TX stats */
+ s32 comp_queued;
+ s32 comp_delivered;
+ s32 msdu_enqued;
+ s32 mpdu_enqued;
+ s32 wmm_drop;
+ s32 local_enqued;
+ s32 local_freed;
+ s32 hw_queued;
+ s32 hw_reaped;
+ s32 underrun;
+ u32 hw_paused;
+ s32 tx_abort;
+ s32 mpdus_requeued;
+ u32 tx_ko;
+ u32 data_rc;
+ u32 self_triggers;
+ u32 sw_retry_failure;
+ u32 illgl_rate_phy_err;
+ u32 pdev_cont_xretry;
+ u32 pdev_tx_timeout;
+ u32 pdev_resets;
+ u32 phy_underrun;
+ u32 txop_ovf;
+ u32 seq_posted;
+ u32 seq_failed_queueing;
+ u32 seq_completed;
+ u32 seq_restarted;
+ u32 mu_seq_posted;
+ u32 mpdus_sw_flush;
+ u32 mpdus_hw_filter;
+ u32 mpdus_truncated;
+ u32 mpdus_ack_failed;
+ u32 mpdus_expired;
+
+ /* PDEV RX stats */
+ s32 mid_ppdu_route_change;
+ s32 status_rcvd;
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+ s32 htt_msdus;
+ s32 htt_mpdus;
+ s32 loc_msdus;
+ s32 loc_mpdus;
+ s32 oversize_amsdu;
+ s32 phy_errs;
+ s32 phy_err_drop;
+ s32 mpdu_errs;
+ s32 rx_ovfl_errs;
+};
+
+struct ath10k_fw_stats {
+ bool extended;
+ struct list_head pdevs;
+ struct list_head vdevs;
+ struct list_head peers;
+ struct list_head peers_extd;
+};
+
+#define ATH10K_TPC_TABLE_TYPE_FLAG 1
+#define ATH10K_TPC_PREAM_TABLE_END 0xFFFF
+
+struct ath10k_tpc_table {
+ u32 pream_idx[WMI_TPC_RATE_MAX];
+ u8 rate_code[WMI_TPC_RATE_MAX];
+ char tpc_value[WMI_TPC_RATE_MAX][WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+};
+
+struct ath10k_tpc_stats {
+ u32 reg_domain;
+ u32 chan_freq;
+ u32 phy_mode;
+ u32 twice_antenna_reduction;
+ u32 twice_max_rd_power;
+ s32 twice_antenna_gain;
+ u32 power_limit;
+ u32 num_tx_chain;
+ u32 ctl;
+ u32 rate_max;
+ u8 flag[WMI_TPC_FLAG];
+ struct ath10k_tpc_table tpc_table[WMI_TPC_FLAG];
+};
+
+struct ath10k_tpc_table_final {
+ u32 pream_idx[WMI_TPC_FINAL_RATE_MAX];
+ u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
+ char tpc_value[WMI_TPC_FINAL_RATE_MAX][WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+};
+
+struct ath10k_tpc_stats_final {
+ u32 reg_domain;
+ u32 chan_freq;
+ u32 phy_mode;
+ u32 twice_antenna_reduction;
+ u32 twice_max_rd_power;
+ s32 twice_antenna_gain;
+ u32 power_limit;
+ u32 num_tx_chain;
+ u32 ctl;
+ u32 rate_max;
+ u8 flag[WMI_TPC_FLAG];
+ struct ath10k_tpc_table_final tpc_table_final[WMI_TPC_FLAG];
+};
+
+struct ath10k_dfs_stats {
+ u32 phy_errors;
+ u32 pulses_total;
+ u32 pulses_detected;
+ u32 pulses_discarded;
+ u32 radar_detected;
+};
+
+enum ath10k_radar_confirmation_state {
+ ATH10K_RADAR_CONFIRMATION_IDLE = 0,
+ ATH10K_RADAR_CONFIRMATION_INPROGRESS,
+ ATH10K_RADAR_CONFIRMATION_STOPPED,
+};
+
+struct ath10k_radar_found_info {
+ u32 pri_min;
+ u32 pri_max;
+ u32 width_min;
+ u32 width_max;
+ u32 sidx_min;
+ u32 sidx_max;
+};
+
+#define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */
+
+struct ath10k_peer {
+ struct list_head list;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+
+ bool removed;
+ int vdev_id;
+ u8 addr[ETH_ALEN];
+ DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
+
+ /* protected by ar->data_lock */
+ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+ union htt_rx_pn_t tids_last_pn[ATH10K_TXRX_NUM_EXT_TIDS];
+ bool tids_last_pn_valid[ATH10K_TXRX_NUM_EXT_TIDS];
+ union htt_rx_pn_t frag_tids_last_pn[ATH10K_TXRX_NUM_EXT_TIDS];
+ u32 frag_tids_seq[ATH10K_TXRX_NUM_EXT_TIDS];
+ struct {
+ enum htt_security_types sec_type;
+ int pn_len;
+ } rx_pn[ATH10K_HTT_TXRX_PEER_SECURITY_MAX];
+};
+
+struct ath10k_txq {
+ struct list_head list;
+ unsigned long num_fw_queued;
+ unsigned long num_push_allowed;
+};
+
+enum ath10k_pkt_rx_err {
+ ATH10K_PKT_RX_ERR_FCS,
+ ATH10K_PKT_RX_ERR_TKIP,
+ ATH10K_PKT_RX_ERR_CRYPT,
+ ATH10K_PKT_RX_ERR_PEER_IDX_INVAL,
+ ATH10K_PKT_RX_ERR_MAX,
+};
+
+enum ath10k_ampdu_subfrm_num {
+ ATH10K_AMPDU_SUBFRM_NUM_10,
+ ATH10K_AMPDU_SUBFRM_NUM_20,
+ ATH10K_AMPDU_SUBFRM_NUM_30,
+ ATH10K_AMPDU_SUBFRM_NUM_40,
+ ATH10K_AMPDU_SUBFRM_NUM_50,
+ ATH10K_AMPDU_SUBFRM_NUM_60,
+ ATH10K_AMPDU_SUBFRM_NUM_MORE,
+ ATH10K_AMPDU_SUBFRM_NUM_MAX,
+};
+
+enum ath10k_amsdu_subfrm_num {
+ ATH10K_AMSDU_SUBFRM_NUM_1,
+ ATH10K_AMSDU_SUBFRM_NUM_2,
+ ATH10K_AMSDU_SUBFRM_NUM_3,
+ ATH10K_AMSDU_SUBFRM_NUM_4,
+ ATH10K_AMSDU_SUBFRM_NUM_MORE,
+ ATH10K_AMSDU_SUBFRM_NUM_MAX,
+};
+
+struct ath10k_sta_tid_stats {
+ unsigned long rx_pkt_from_fw;
+ unsigned long rx_pkt_unchained;
+ unsigned long rx_pkt_drop_chained;
+ unsigned long rx_pkt_drop_filter;
+ unsigned long rx_pkt_err[ATH10K_PKT_RX_ERR_MAX];
+ unsigned long rx_pkt_queued_for_mac;
+ unsigned long rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_MAX];
+ unsigned long rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MAX];
+};
+
+enum ath10k_counter_type {
+ ATH10K_COUNTER_TYPE_BYTES,
+ ATH10K_COUNTER_TYPE_PKTS,
+ ATH10K_COUNTER_TYPE_MAX,
+};
+
+enum ath10k_stats_type {
+ ATH10K_STATS_TYPE_SUCC,
+ ATH10K_STATS_TYPE_FAIL,
+ ATH10K_STATS_TYPE_RETRY,
+ ATH10K_STATS_TYPE_AMPDU,
+ ATH10K_STATS_TYPE_MAX,
+};
+
+struct ath10k_htt_data_stats {
+ u64 legacy[ATH10K_COUNTER_TYPE_MAX][ATH10K_LEGACY_NUM];
+ u64 ht[ATH10K_COUNTER_TYPE_MAX][ATH10K_HT_MCS_NUM];
+ u64 vht[ATH10K_COUNTER_TYPE_MAX][ATH10K_VHT_MCS_NUM];
+ u64 bw[ATH10K_COUNTER_TYPE_MAX][ATH10K_BW_NUM];
+ u64 nss[ATH10K_COUNTER_TYPE_MAX][ATH10K_NSS_NUM];
+ u64 gi[ATH10K_COUNTER_TYPE_MAX][ATH10K_GI_NUM];
+ u64 rate_table[ATH10K_COUNTER_TYPE_MAX][ATH10K_RATE_TABLE_NUM];
+};
+
+struct ath10k_htt_tx_stats {
+ struct ath10k_htt_data_stats stats[ATH10K_STATS_TYPE_MAX];
+ u64 tx_duration;
+ u64 ba_fails;
+ u64 ack_fails;
+};
+
+#define ATH10K_TID_MAX 8
+
+struct ath10k_sta {
+ struct ath10k_vif *arvif;
+
+ /* the following are protected by ar->data_lock */
+ u32 changed; /* IEEE80211_RC_* */
+ u32 bw;
+ u32 nss;
+ u32 smps;
+ u16 peer_id;
+ struct rate_info txrate;
+ struct ieee80211_tx_info tx_info;
+ u32 tx_retries;
+ u32 tx_failed;
+ u32 last_tx_bitrate;
+
+ u32 rx_rate_code;
+ u32 rx_bitrate_kbps;
+ u32 tx_rate_code;
+ u32 tx_bitrate_kbps;
+ struct work_struct update_wk;
+ u64 rx_duration;
+ struct ath10k_htt_tx_stats *tx_stats;
+ u32 ucast_cipher;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* protected by conf_mutex */
+ bool aggr_mode;
+
+ /* Protected with ar->data_lock */
+ struct ath10k_sta_tid_stats tid_stats[IEEE80211_NUM_TIDS + 1];
+#endif
+ /* Protected with ar->data_lock */
+ u32 peer_ps_state;
+ struct work_struct tid_config_wk;
+ int noack[ATH10K_TID_MAX];
+ int retry_long[ATH10K_TID_MAX];
+ int ampdu[ATH10K_TID_MAX];
+ u8 rate_ctrl[ATH10K_TID_MAX];
+ u32 rate_code[ATH10K_TID_MAX];
+ int rtscts[ATH10K_TID_MAX];
+};
+
+#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_VDEV_DELETE_TIMEOUT_HZ (5 * HZ)
+
+enum ath10k_beacon_state {
+ ATH10K_BEACON_SCHEDULED = 0,
+ ATH10K_BEACON_SENDING,
+ ATH10K_BEACON_SENT,
+};
+
+struct ath10k_vif {
+ struct list_head list;
+
+ u32 vdev_id;
+ u16 peer_id;
+ enum wmi_vdev_type vdev_type;
+ enum wmi_vdev_subtype vdev_subtype;
+ u32 beacon_interval;
+ u32 dtim_period;
+ struct sk_buff *beacon;
+ /* protected by data_lock */
+ enum ath10k_beacon_state beacon_state;
+ void *beacon_buf;
+ dma_addr_t beacon_paddr;
+ unsigned long tx_paused; /* arbitrary values defined by target */
+
+ struct ath10k *ar;
+ struct ieee80211_vif *vif;
+
+ bool is_started;
+ bool is_up;
+ bool spectral_enabled;
+ bool ps;
+ u32 aid;
+ u8 bssid[ETH_ALEN];
+
+ struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
+ s8 def_wep_key_idx;
+
+ u16 tx_seq_no;
+
+ union {
+ struct {
+ u32 uapsd;
+ } sta;
+ struct {
+ /* 512 stations */
+ u8 tim_bitmap[64];
+ u8 tim_len;
+ u32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN] __nonstring;
+ bool hidden_ssid;
+ /* P2P_IE with NoA attribute for P2P_GO case */
+ u32 noa_len;
+ u8 *noa_data;
+ } ap;
+ } u;
+
+ bool use_cts_prot;
+ bool nohwcrypt;
+ int num_legacy_stations;
+ int txpower;
+ bool ftm_responder;
+ struct wmi_wmm_params_all_arg wmm_params;
+ struct work_struct ap_csa_work;
+ struct delayed_work connection_loss_work;
+ struct cfg80211_bitrate_mask bitrate_mask;
+
+ /* For setting VHT peer fixed rate, protected by conf_mutex */
+ int vht_num_rates;
+ u8 vht_pfr;
+ u32 tid_conf_changed[ATH10K_TID_MAX];
+ int noack[ATH10K_TID_MAX];
+ int retry_long[ATH10K_TID_MAX];
+ int ampdu[ATH10K_TID_MAX];
+ u8 rate_ctrl[ATH10K_TID_MAX];
+ u32 rate_code[ATH10K_TID_MAX];
+ int rtscts[ATH10K_TID_MAX];
+ u32 tids_rst;
+};
+
+struct ath10k_vif_iter {
+ u32 vdev_id;
+ struct ath10k_vif *arvif;
+};
+
+/* Copy Engine register dump, protected by ce-lock */
+struct ath10k_ce_crash_data {
+ __le32 base_addr;
+ __le32 src_wr_idx;
+ __le32 src_r_idx;
+ __le32 dst_wr_idx;
+ __le32 dst_r_idx;
+};
+
+struct ath10k_ce_crash_hdr {
+ __le32 ce_count;
+ __le32 reserved[3]; /* for future use */
+ struct ath10k_ce_crash_data entries[];
+};
+
+#define MAX_MEM_DUMP_TYPE 5
+
+/* used for crash-dump storage, protected by data-lock */
+struct ath10k_fw_crash_data {
+ guid_t guid;
+ struct timespec64 timestamp;
+ __le32 registers[REG_DUMP_COUNT_QCA988X];
+ struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX];
+
+ u8 *ramdump_buf;
+ size_t ramdump_buf_len;
+};
+
+struct ath10k_debug {
+ struct dentry *debugfs_phy;
+
+ struct ath10k_fw_stats fw_stats;
+ struct completion fw_stats_complete;
+ bool fw_stats_done;
+
+ unsigned long htt_stats_mask;
+ unsigned long reset_htt_stats;
+ struct delayed_work htt_stats_dwork;
+ struct ath10k_dfs_stats dfs_stats;
+ struct ath_dfs_pool_stats dfs_pool_stats;
+
+ /* used for tpc-dump storage, protected by data-lock */
+ struct ath10k_tpc_stats *tpc_stats;
+ struct ath10k_tpc_stats_final *tpc_stats_final;
+
+ struct completion tpc_complete;
+
+ /* protected by conf_mutex */
+ u64 fw_dbglog_mask;
+ u32 fw_dbglog_level;
+ u32 reg_addr;
+ u32 nf_cal_period;
+ void *cal_data;
+ u32 enable_extd_tx_stats;
+ u8 fw_dbglog_mode;
+};
+
+enum ath10k_state {
+ ATH10K_STATE_OFF = 0,
+ ATH10K_STATE_ON,
+
+ /* When doing firmware recovery the device is first powered down.
+ * mac80211 is supposed to call in to start() hook later on. It is
+ * however possible that driver unloading and firmware crash overlap.
+ * mac80211 can wait on conf_mutex in stop() while the device is
+ * stopped in ath10k_core_restart() work holding conf_mutex. The state
+ * RESTARTED means that the device is up and mac80211 has started hw
+ * reconfiguration. Once mac80211 is done with the reconfiguration we
+ * set the state to STATE_ON in reconfig_complete().
+ */
+ ATH10K_STATE_RESTARTING,
+ ATH10K_STATE_RESTARTED,
+
+ /* The device has crashed while restarting hw. This state is like ON
+ * but commands are blocked in HTC and -ECOMM response is given. This
+ * prevents completion timeouts and makes the driver more responsive to
+ * userspace commands. This is also prevents recursive recovery.
+ */
+ ATH10K_STATE_WEDGED,
+
+ /* factory tests */
+ ATH10K_STATE_UTF,
+};
+
+enum ath10k_firmware_mode {
+ /* the default mode, standard 802.11 functionality */
+ ATH10K_FIRMWARE_MODE_NORMAL,
+
+ /* factory tests etc */
+ ATH10K_FIRMWARE_MODE_UTF,
+};
+
+enum ath10k_fw_features {
+ /* wmi_mgmt_rx_hdr contains extra RSSI information */
+ ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
+
+ /* Firmware from 10X branch. Deprecated, don't use in new code. */
+ ATH10K_FW_FEATURE_WMI_10X = 1,
+
+ /* firmware support tx frame management over WMI, otherwise it's HTT */
+ ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
+
+ /* Firmware does not support P2P */
+ ATH10K_FW_FEATURE_NO_P2P = 3,
+
+ /* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature
+ * bit is required to be set as well. Deprecated, don't use in new
+ * code.
+ */
+ ATH10K_FW_FEATURE_WMI_10_2 = 4,
+
+ /* Some firmware revisions lack proper multi-interface client powersave
+ * implementation. Enabling PS could result in connection drops,
+ * traffic stalls, etc.
+ */
+ ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
+
+ /* Some firmware revisions have an incomplete WoWLAN implementation
+ * despite WMI service bit being advertised. This feature flag is used
+ * to distinguish whether WoWLAN is really supported or not.
+ */
+ ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
+
+ /* Don't trust error code from otp.bin */
+ ATH10K_FW_FEATURE_IGNORE_OTP_RESULT = 7,
+
+ /* Some firmware revisions pad 4th hw address to 4 byte boundary making
+ * it 8 bytes long in Native Wifi Rx decap.
+ */
+ ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING = 8,
+
+ /* Firmware supports bypassing PLL setting on init. */
+ ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9,
+
+ /* Raw mode support. If supported, FW supports receiving and transmitting
+ * frames in raw mode.
+ */
+ ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10,
+
+ /* Firmware Supports Adaptive CCA*/
+ ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA = 11,
+
+ /* Firmware supports management frame protection */
+ ATH10K_FW_FEATURE_MFP_SUPPORT = 12,
+
+ /* Firmware supports pull-push model where host shares it's software
+ * queue state with firmware and firmware generates fetch requests
+ * telling host which queues to dequeue tx from.
+ *
+ * Primary function of this is improved MU-MIMO performance with
+ * multiple clients.
+ */
+ ATH10K_FW_FEATURE_PEER_FLOW_CONTROL = 13,
+
+ /* Firmware supports BT-Coex without reloading firmware via pdev param.
+ * To support Bluetooth coexistence pdev param, WMI_COEX_GPIO_SUPPORT of
+ * extended resource config should be enabled always. This firmware IE
+ * is used to configure WMI_COEX_GPIO_SUPPORT.
+ */
+ ATH10K_FW_FEATURE_BTCOEX_PARAM = 14,
+
+ /* Unused flag and proven to be not working, enable this if you want
+ * to experiment sending NULL func data frames in HTT TX
+ */
+ ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR = 15,
+
+ /* Firmware allow other BSS mesh broadcast/multicast frames without
+ * creating monitor interface. Appropriate rxfilters are programmed for
+ * mesh vdev by firmware itself. This feature flags will be used for
+ * not creating monitor vdev while configuring mesh node.
+ */
+ ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST = 16,
+
+ /* Firmware does not support power save in station mode. */
+ ATH10K_FW_FEATURE_NO_PS = 17,
+
+ /* Firmware allows management tx by reference instead of by value. */
+ ATH10K_FW_FEATURE_MGMT_TX_BY_REF = 18,
+
+ /* Firmware load is done externally, not by bmi */
+ ATH10K_FW_FEATURE_NON_BMI = 19,
+
+ /* Firmware sends only one chan_info event per channel */
+ ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL = 20,
+
+ /* Firmware allows setting peer fixed rate */
+ ATH10K_FW_FEATURE_PEER_FIXED_RATE = 21,
+
+ /* Firmware support IRAM recovery */
+ ATH10K_FW_FEATURE_IRAM_RECOVERY = 22,
+
+ /* keep last */
+ ATH10K_FW_FEATURE_COUNT,
+};
+
+enum ath10k_dev_flags {
+ /* Indicates that ath10k device is during CAC phase of DFS */
+ ATH10K_CAC_RUNNING,
+ ATH10K_FLAG_CORE_REGISTERED,
+
+ /* Device has crashed and needs to restart. This indicates any pending
+ * waiters should immediately cancel instead of waiting for a time out.
+ */
+ ATH10K_FLAG_CRASH_FLUSH,
+
+ /* Use Raw mode instead of native WiFi Tx/Rx encap mode.
+ * Raw mode supports both hardware and software crypto. Native WiFi only
+ * supports hardware crypto.
+ */
+ ATH10K_FLAG_RAW_MODE,
+
+ /* Disable HW crypto engine */
+ ATH10K_FLAG_HW_CRYPTO_DISABLED,
+
+ /* Bluetooth coexistence enabled */
+ ATH10K_FLAG_BTCOEX,
+
+ /* Per Station statistics service */
+ ATH10K_FLAG_PEER_STATS,
+
+ /* protected by conf_mutex */
+ ATH10K_FLAG_NAPI_ENABLED,
+};
+
+enum ath10k_cal_mode {
+ ATH10K_CAL_MODE_FILE,
+ ATH10K_CAL_MODE_OTP,
+ ATH10K_CAL_MODE_DT,
+ ATH10K_CAL_MODE_NVMEM,
+ ATH10K_PRE_CAL_MODE_FILE,
+ ATH10K_PRE_CAL_MODE_DT,
+ ATH10K_PRE_CAL_MODE_NVMEM,
+ ATH10K_CAL_MODE_EEPROM,
+};
+
+enum ath10k_crypt_mode {
+ /* Only use hardware crypto engine */
+ ATH10K_CRYPT_MODE_HW,
+ /* Only use software crypto engine */
+ ATH10K_CRYPT_MODE_SW,
+};
+
+static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
+{
+ switch (mode) {
+ case ATH10K_CAL_MODE_FILE:
+ return "file";
+ case ATH10K_CAL_MODE_OTP:
+ return "otp";
+ case ATH10K_CAL_MODE_DT:
+ return "dt";
+ case ATH10K_CAL_MODE_NVMEM:
+ return "nvmem";
+ case ATH10K_PRE_CAL_MODE_FILE:
+ return "pre-cal-file";
+ case ATH10K_PRE_CAL_MODE_DT:
+ return "pre-cal-dt";
+ case ATH10K_PRE_CAL_MODE_NVMEM:
+ return "pre-cal-nvmem";
+ case ATH10K_CAL_MODE_EEPROM:
+ return "eeprom";
+ }
+
+ return "unknown";
+}
+
+enum ath10k_scan_state {
+ ATH10K_SCAN_IDLE,
+ ATH10K_SCAN_STARTING,
+ ATH10K_SCAN_RUNNING,
+ ATH10K_SCAN_ABORTING,
+};
+
+static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
+{
+ switch (state) {
+ case ATH10K_SCAN_IDLE:
+ return "idle";
+ case ATH10K_SCAN_STARTING:
+ return "starting";
+ case ATH10K_SCAN_RUNNING:
+ return "running";
+ case ATH10K_SCAN_ABORTING:
+ return "aborting";
+ }
+
+ return "unknown";
+}
+
+enum ath10k_tx_pause_reason {
+ ATH10K_TX_PAUSE_Q_FULL,
+ ATH10K_TX_PAUSE_MAX,
+};
+
+struct ath10k_fw_file {
+ const struct firmware *firmware;
+
+ char fw_version[ETHTOOL_FWVERS_LEN];
+
+ DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
+ enum ath10k_fw_wmi_op_version wmi_op_version;
+ enum ath10k_fw_htt_op_version htt_op_version;
+
+ const void *firmware_data;
+ size_t firmware_len;
+
+ const void *otp_data;
+ size_t otp_len;
+
+ const void *codeswap_data;
+ size_t codeswap_len;
+
+ /* The original idea of struct ath10k_fw_file was that it only
+ * contains struct firmware and pointers to various parts (actual
+ * firmware binary, otp, metadata etc) of the file. This seg_info
+ * is actually created separate but as this is used similarly as
+ * the other firmware components it's more convenient to have it
+ * here.
+ */
+ struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
+};
+
+struct ath10k_fw_components {
+ const struct firmware *board;
+ const void *board_data;
+ size_t board_len;
+ const struct firmware *ext_board;
+ const void *ext_board_data;
+ size_t ext_board_len;
+
+ struct ath10k_fw_file fw_file;
+};
+
+struct ath10k_per_peer_tx_stats {
+ u32 succ_bytes;
+ u32 retry_bytes;
+ u32 failed_bytes;
+ u8 ratecode;
+ u8 flags;
+ u16 peer_id;
+ u16 succ_pkts;
+ u16 retry_pkts;
+ u16 failed_pkts;
+ u16 duration;
+ u32 reserved1;
+ u32 reserved2;
+};
+
+enum ath10k_dev_type {
+ ATH10K_DEV_TYPE_LL,
+ ATH10K_DEV_TYPE_HL,
+};
+
+struct ath10k_bus_params {
+ u32 chip_id;
+ enum ath10k_dev_type dev_type;
+ bool link_can_suspend;
+ bool hl_msdu_ids;
+};
+
+struct ath10k {
+ struct ath_common ath_common;
+ struct ieee80211_hw *hw;
+ struct ieee80211_ops *ops;
+ struct device *dev;
+ struct msa_region {
+ dma_addr_t paddr;
+ u32 mem_size;
+ void *vaddr;
+ } msa;
+ u8 mac_addr[ETH_ALEN];
+
+ enum ath10k_hw_rev hw_rev;
+ u16 dev_id;
+ u32 chip_id;
+ u32 target_version;
+ u8 fw_version_major;
+ u32 fw_version_minor;
+ u16 fw_version_release;
+ u16 fw_version_build;
+ u32 fw_stats_req_mask;
+ u32 phy_capability;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 hw_eeprom_rd;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ u32 vht_supp_mcs;
+ u32 num_rf_chains;
+ u32 max_spatial_stream;
+ /* protected by conf_mutex */
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+ bool ani_enabled;
+ u32 sys_cap_info;
+
+ /* protected by data_lock */
+ bool hw_rfkill_on;
+
+ /* protected by conf_mutex */
+ u8 ps_state_enable;
+
+ bool nlo_enabled;
+ bool p2p;
+
+ struct {
+ enum ath10k_bus bus;
+ const struct ath10k_hif_ops *ops;
+ } hif;
+
+ struct completion target_suspend;
+ struct completion driver_recovery;
+
+ const struct ath10k_hw_regs *regs;
+ const struct ath10k_hw_ce_regs *hw_ce_regs;
+ const struct ath10k_hw_values *hw_values;
+ struct ath10k_bmi bmi;
+ struct ath10k_wmi wmi;
+ struct ath10k_htc htc;
+ struct ath10k_htt htt;
+
+ struct ath10k_hw_params hw_params;
+
+ /* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */
+ struct ath10k_fw_components normal_mode_fw;
+
+ /* READ-ONLY images of the running firmware, which can be either
+ * normal or UTF. Do not modify, release etc!
+ */
+ const struct ath10k_fw_components *running_fw;
+
+ const char *board_name;
+
+ const struct firmware *pre_cal_file;
+ const struct firmware *cal_file;
+
+ struct {
+ u32 vendor;
+ u32 device;
+ u32 subsystem_vendor;
+ u32 subsystem_device;
+
+ bool bmi_ids_valid;
+ bool qmi_ids_valid;
+ u32 qmi_board_id;
+ u32 qmi_chip_id;
+ u8 bmi_board_id;
+ u8 bmi_eboard_id;
+ u8 bmi_chip_id;
+ bool ext_bid_supported;
+
+ char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH];
+ } id;
+
+ int fw_api;
+ int bd_api;
+ enum ath10k_cal_mode cal_mode;
+
+ struct {
+ struct completion started;
+ struct completion completed;
+ struct completion on_channel;
+ struct delayed_work timeout;
+ enum ath10k_scan_state state;
+ bool is_roc;
+ int vdev_id;
+ int roc_freq;
+ bool roc_notify;
+ } scan;
+
+ struct {
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
+ } mac;
+
+ /* should never be NULL; needed for regular htt rx */
+ struct ieee80211_channel *rx_channel;
+
+ /* valid during scan; needed for mgmt rx during scan */
+ struct ieee80211_channel *scan_channel;
+
+ /* current operating channel definition */
+ struct cfg80211_chan_def chandef;
+
+ /* currently configured operating channel in firmware */
+ struct ieee80211_channel *tgt_oper_chan;
+
+ unsigned long long free_vdev_map;
+ struct ath10k_vif *monitor_arvif;
+ bool monitor;
+ int monitor_vdev_id;
+ bool monitor_started;
+ unsigned int filter_flags;
+ unsigned long dev_flags;
+ bool dfs_block_radar_events;
+
+ /* protected by conf_mutex */
+ bool radar_enabled;
+ int num_started_vdevs;
+
+ /* Protected by conf-mutex */
+ u8 cfg_tx_chainmask;
+ u8 cfg_rx_chainmask;
+
+ struct completion install_key_done;
+
+ int last_wmi_vdev_start_status;
+ struct completion vdev_setup_done;
+ struct completion vdev_delete_done;
+ struct completion peer_stats_info_complete;
+
+ struct workqueue_struct *workqueue;
+ /* Auxiliary workqueue */
+ struct workqueue_struct *workqueue_aux;
+ struct workqueue_struct *workqueue_tx_complete;
+ /* prevents concurrent FW reconfiguration */
+ struct mutex conf_mutex;
+
+ /* protects coredump data */
+ struct mutex dump_mutex;
+
+ /* protects shared structure data */
+ spinlock_t data_lock;
+
+ /* serialize wake_tx_queue calls per ac */
+ spinlock_t queue_lock[IEEE80211_NUM_ACS];
+
+ struct list_head arvifs;
+ struct list_head peers;
+ struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS];
+ wait_queue_head_t peer_mapping_wq;
+
+ /* protected by conf_mutex */
+ int num_peers;
+ int num_stations;
+
+ int max_num_peers;
+ int max_num_stations;
+ int max_num_vdevs;
+ int max_num_tdls_vdevs;
+ int num_active_peers;
+ int num_tids;
+
+ struct work_struct svc_rdy_work;
+ struct sk_buff *svc_rdy_skb;
+
+ struct work_struct offchan_tx_work;
+ struct sk_buff_head offchan_tx_queue;
+ struct completion offchan_tx_completed;
+ struct sk_buff *offchan_tx_skb;
+
+ struct work_struct wmi_mgmt_tx_work;
+ struct sk_buff_head wmi_mgmt_tx_queue;
+
+ enum ath10k_state state;
+
+ struct work_struct register_work;
+ struct work_struct restart_work;
+ struct work_struct recovery_check_work;
+ struct work_struct bundle_tx_work;
+ struct work_struct tx_complete_work;
+
+ atomic_t pending_recovery;
+ unsigned int recovery_count;
+ /* continuous recovery fail count */
+ atomic_t fail_cont_count;
+
+ /* cycle count is reported twice for each visited channel during scan.
+ * access protected by data_lock
+ */
+ u32 survey_last_rx_clear_count;
+ u32 survey_last_cycle_count;
+ struct survey_info survey[ATH10K_NUM_CHANS];
+
+ /* Channel info events are expected to come in pairs without and with
+ * COMPLETE flag set respectively for each channel visit during scan.
+ *
+ * However there are deviations from this rule. This flag is used to
+ * avoid reporting garbage data.
+ */
+ bool ch_info_can_report_survey;
+ struct completion bss_survey_done;
+
+ struct dfs_pattern_detector *dfs_detector;
+
+ unsigned long tx_paused; /* see ATH10K_TX_PAUSE_ */
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+ struct ath10k_debug debug;
+ struct {
+ /* relay(fs) channel for spectral scan */
+ struct rchan *rfs_chan_spec_scan;
+
+ /* spectral_mode and spec_config are protected by conf_mutex */
+ enum ath10k_spectral_mode mode;
+ struct ath10k_spec_scan config;
+ } spectral;
+#endif
+
+ u32 pktlog_filter;
+
+#ifdef CONFIG_DEV_COREDUMP
+ struct {
+ struct ath10k_fw_crash_data *fw_crash_data;
+ } coredump;
+#endif
+
+ struct {
+ /* protected by conf_mutex */
+ struct ath10k_fw_components utf_mode_fw;
+ u8 ftm_msgref;
+
+ /* protected by data_lock */
+ bool utf_monitor;
+ u32 data_pos;
+ u32 expected_seq;
+ u8 *eventdata;
+ } testmode;
+
+ struct {
+ struct gpio_led wifi_led;
+ struct led_classdev cdev;
+ char label[48];
+ u32 gpio_state_pin;
+ } leds;
+
+ struct {
+ /* protected by data_lock */
+ u32 rx_crc_err_drop;
+ u32 fw_crash_counter;
+ u32 fw_warm_reset_counter;
+ u32 fw_cold_reset_counter;
+ } stats;
+
+ struct ath10k_thermal thermal;
+ struct ath10k_wow wow;
+ struct ath10k_per_peer_tx_stats peer_tx_stats;
+
+ /* NAPI */
+ struct net_device *napi_dev;
+ struct napi_struct napi;
+
+ struct work_struct set_coverage_class_work;
+ /* protected by conf_mutex */
+ struct {
+ /* writing also protected by data_lock */
+ s16 coverage_class;
+
+ u32 reg_phyclk;
+ u32 reg_slottime_conf;
+ u32 reg_slottime_orig;
+ u32 reg_ack_cts_timeout_conf;
+ u32 reg_ack_cts_timeout_orig;
+ } fw_coverage;
+
+ u32 ampdu_reference;
+
+ const u8 *wmi_key_cipher;
+ void *ce_priv;
+
+ u32 sta_tid_stats_mask;
+
+ /* protected by data_lock */
+ enum ath10k_radar_confirmation_state radar_conf_state;
+ struct ath10k_radar_found_info last_radar_info;
+ struct work_struct radar_confirmation_work;
+ struct ath10k_bus_params bus_param;
+ struct completion peer_delete_done;
+
+ bool coex_support;
+ int coex_gpio_pin;
+
+ s32 tx_power_2g_limit;
+ s32 tx_power_5g_limit;
+
+ /* must be last */
+ u8 drv_priv[] __aligned(sizeof(void *));
+};
+
+static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
+{
+ if (test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) &&
+ test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ return true;
+
+ return false;
+}
+
+extern unsigned int ath10k_frame_mode;
+extern unsigned long ath10k_coredump_mask;
+
+void ath10k_core_napi_sync_disable(struct ath10k *ar);
+void ath10k_core_napi_enable(struct ath10k *ar);
+struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
+ enum ath10k_bus bus,
+ enum ath10k_hw_rev hw_rev,
+ const struct ath10k_hif_ops *hif_ops);
+void ath10k_core_destroy(struct ath10k *ar);
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+ char *buf,
+ size_t max_len);
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+ struct ath10k_fw_file *fw_file);
+
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+ const struct ath10k_fw_components *fw_components);
+int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
+void ath10k_core_stop(struct ath10k *ar);
+void ath10k_core_start_recovery(struct ath10k *ar);
+int ath10k_core_register(struct ath10k *ar,
+ const struct ath10k_bus_params *bus_params);
+void ath10k_core_unregister(struct ath10k *ar);
+int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type);
+int ath10k_core_check_dt(struct ath10k *ar);
+void ath10k_core_free_board_files(struct ath10k *ar);
+
+#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
new file mode 100644
index 000000000000..50d0c4213ecf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -0,0 +1,1666 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "coredump.h"
+
+#include <linux/devcoredump.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/utsname.h>
+
+#include "debug.h"
+#include "hw.h"
+
+static const struct ath10k_mem_section qca6174_hw21_register_sections[] = {
+ {0x800, 0x810},
+ {0x820, 0x82C},
+ {0x830, 0x8F4},
+ {0x90C, 0x91C},
+ {0xA14, 0xA18},
+ {0xA84, 0xA94},
+ {0xAA8, 0xAD4},
+ {0xADC, 0xB40},
+ {0x1000, 0x10A4},
+ {0x10BC, 0x111C},
+ {0x1134, 0x1138},
+ {0x1144, 0x114C},
+ {0x1150, 0x115C},
+ {0x1160, 0x1178},
+ {0x1240, 0x1260},
+ {0x2000, 0x207C},
+ {0x3000, 0x3014},
+ {0x4000, 0x4014},
+ {0x5000, 0x5124},
+ {0x6000, 0x6040},
+ {0x6080, 0x60CC},
+ {0x6100, 0x611C},
+ {0x6140, 0x61D8},
+ {0x6200, 0x6238},
+ {0x6240, 0x628C},
+ {0x62C0, 0x62EC},
+ {0x6380, 0x63E8},
+ {0x6400, 0x6440},
+ {0x6480, 0x64CC},
+ {0x6500, 0x651C},
+ {0x6540, 0x6580},
+ {0x6600, 0x6638},
+ {0x6640, 0x668C},
+ {0x66C0, 0x66EC},
+ {0x6780, 0x67E8},
+ {0x7080, 0x708C},
+ {0x70C0, 0x70C8},
+ {0x7400, 0x741C},
+ {0x7440, 0x7454},
+ {0x7800, 0x7818},
+ {0x8000, 0x8004},
+ {0x8010, 0x8064},
+ {0x8080, 0x8084},
+ {0x80A0, 0x80A4},
+ {0x80C0, 0x80C4},
+ {0x80E0, 0x80F4},
+ {0x8100, 0x8104},
+ {0x8110, 0x812C},
+ {0x9000, 0x9004},
+ {0x9800, 0x982C},
+ {0x9830, 0x9838},
+ {0x9840, 0x986C},
+ {0x9870, 0x9898},
+ {0x9A00, 0x9C00},
+ {0xD580, 0xD59C},
+ {0xF000, 0xF0E0},
+ {0xF140, 0xF190},
+ {0xF250, 0xF25C},
+ {0xF260, 0xF268},
+ {0xF26C, 0xF2A8},
+ {0x10008, 0x1000C},
+ {0x10014, 0x10018},
+ {0x1001C, 0x10020},
+ {0x10024, 0x10028},
+ {0x10030, 0x10034},
+ {0x10040, 0x10054},
+ {0x10058, 0x1007C},
+ {0x10080, 0x100C4},
+ {0x100C8, 0x10114},
+ {0x1012C, 0x10130},
+ {0x10138, 0x10144},
+ {0x10200, 0x10220},
+ {0x10230, 0x10250},
+ {0x10260, 0x10280},
+ {0x10290, 0x102B0},
+ {0x102C0, 0x102DC},
+ {0x102E0, 0x102F4},
+ {0x102FC, 0x1037C},
+ {0x10380, 0x10390},
+ {0x10800, 0x10828},
+ {0x10840, 0x10844},
+ {0x10880, 0x10884},
+ {0x108C0, 0x108E8},
+ {0x10900, 0x10928},
+ {0x10940, 0x10944},
+ {0x10980, 0x10984},
+ {0x109C0, 0x109E8},
+ {0x10A00, 0x10A28},
+ {0x10A40, 0x10A50},
+ {0x11000, 0x11028},
+ {0x11030, 0x11034},
+ {0x11038, 0x11068},
+ {0x11070, 0x11074},
+ {0x11078, 0x110A8},
+ {0x110B0, 0x110B4},
+ {0x110B8, 0x110E8},
+ {0x110F0, 0x110F4},
+ {0x110F8, 0x11128},
+ {0x11138, 0x11144},
+ {0x11178, 0x11180},
+ {0x111B8, 0x111C0},
+ {0x111F8, 0x11200},
+ {0x11238, 0x1123C},
+ {0x11270, 0x11274},
+ {0x11278, 0x1127C},
+ {0x112B0, 0x112B4},
+ {0x112B8, 0x112BC},
+ {0x112F0, 0x112F4},
+ {0x112F8, 0x112FC},
+ {0x11338, 0x1133C},
+ {0x11378, 0x1137C},
+ {0x113B8, 0x113BC},
+ {0x113F8, 0x113FC},
+ {0x11438, 0x11440},
+ {0x11478, 0x11480},
+ {0x114B8, 0x114BC},
+ {0x114F8, 0x114FC},
+ {0x11538, 0x1153C},
+ {0x11578, 0x1157C},
+ {0x115B8, 0x115BC},
+ {0x115F8, 0x115FC},
+ {0x11638, 0x1163C},
+ {0x11678, 0x1167C},
+ {0x116B8, 0x116BC},
+ {0x116F8, 0x116FC},
+ {0x11738, 0x1173C},
+ {0x11778, 0x1177C},
+ {0x117B8, 0x117BC},
+ {0x117F8, 0x117FC},
+ {0x17000, 0x1701C},
+ {0x17020, 0x170AC},
+ {0x18000, 0x18050},
+ {0x18054, 0x18074},
+ {0x18080, 0x180D4},
+ {0x180DC, 0x18104},
+ {0x18108, 0x1813C},
+ {0x18144, 0x18148},
+ {0x18168, 0x18174},
+ {0x18178, 0x18180},
+ {0x181C8, 0x181E0},
+ {0x181E4, 0x181E8},
+ {0x181EC, 0x1820C},
+ {0x1825C, 0x18280},
+ {0x18284, 0x18290},
+ {0x18294, 0x182A0},
+ {0x18300, 0x18304},
+ {0x18314, 0x18320},
+ {0x18328, 0x18350},
+ {0x1835C, 0x1836C},
+ {0x18370, 0x18390},
+ {0x18398, 0x183AC},
+ {0x183BC, 0x183D8},
+ {0x183DC, 0x183F4},
+ {0x18400, 0x186F4},
+ {0x186F8, 0x1871C},
+ {0x18720, 0x18790},
+ {0x19800, 0x19830},
+ {0x19834, 0x19840},
+ {0x19880, 0x1989C},
+ {0x198A4, 0x198B0},
+ {0x198BC, 0x19900},
+ {0x19C00, 0x19C88},
+ {0x19D00, 0x19D20},
+ {0x19E00, 0x19E7C},
+ {0x19E80, 0x19E94},
+ {0x19E98, 0x19EAC},
+ {0x19EB0, 0x19EBC},
+ {0x19F70, 0x19F74},
+ {0x19F80, 0x19F8C},
+ {0x19FA0, 0x19FB4},
+ {0x19FC0, 0x19FD8},
+ {0x1A000, 0x1A200},
+ {0x1A204, 0x1A210},
+ {0x1A228, 0x1A22C},
+ {0x1A230, 0x1A248},
+ {0x1A250, 0x1A270},
+ {0x1A280, 0x1A290},
+ {0x1A2A0, 0x1A2A4},
+ {0x1A2C0, 0x1A2EC},
+ {0x1A300, 0x1A3BC},
+ {0x1A3F0, 0x1A3F4},
+ {0x1A3F8, 0x1A434},
+ {0x1A438, 0x1A444},
+ {0x1A448, 0x1A468},
+ {0x1A580, 0x1A58C},
+ {0x1A644, 0x1A654},
+ {0x1A670, 0x1A698},
+ {0x1A6AC, 0x1A6B0},
+ {0x1A6D0, 0x1A6D4},
+ {0x1A6EC, 0x1A70C},
+ {0x1A710, 0x1A738},
+ {0x1A7C0, 0x1A7D0},
+ {0x1A7D4, 0x1A7D8},
+ {0x1A7DC, 0x1A7E4},
+ {0x1A7F0, 0x1A7F8},
+ {0x1A888, 0x1A89C},
+ {0x1A8A8, 0x1A8AC},
+ {0x1A8C0, 0x1A8DC},
+ {0x1A8F0, 0x1A8FC},
+ {0x1AE04, 0x1AE08},
+ {0x1AE18, 0x1AE24},
+ {0x1AF80, 0x1AF8C},
+ {0x1AFA0, 0x1AFB4},
+ {0x1B000, 0x1B200},
+ {0x1B284, 0x1B288},
+ {0x1B2D0, 0x1B2D8},
+ {0x1B2DC, 0x1B2EC},
+ {0x1B300, 0x1B340},
+ {0x1B374, 0x1B378},
+ {0x1B380, 0x1B384},
+ {0x1B388, 0x1B38C},
+ {0x1B404, 0x1B408},
+ {0x1B420, 0x1B428},
+ {0x1B440, 0x1B444},
+ {0x1B448, 0x1B44C},
+ {0x1B450, 0x1B458},
+ {0x1B45C, 0x1B468},
+ {0x1B584, 0x1B58C},
+ {0x1B68C, 0x1B690},
+ {0x1B6AC, 0x1B6B0},
+ {0x1B7F0, 0x1B7F8},
+ {0x1C800, 0x1CC00},
+ {0x1CE00, 0x1CE04},
+ {0x1CF80, 0x1CF84},
+ {0x1D200, 0x1D800},
+ {0x1E000, 0x20014},
+ {0x20100, 0x20124},
+ {0x21400, 0x217A8},
+ {0x21800, 0x21BA8},
+ {0x21C00, 0x21FA8},
+ {0x22000, 0x223A8},
+ {0x22400, 0x227A8},
+ {0x22800, 0x22BA8},
+ {0x22C00, 0x22FA8},
+ {0x23000, 0x233A8},
+ {0x24000, 0x24034},
+ {0x26000, 0x26064},
+ {0x27000, 0x27024},
+ {0x34000, 0x3400C},
+ {0x34400, 0x3445C},
+ {0x34800, 0x3485C},
+ {0x34C00, 0x34C5C},
+ {0x35000, 0x3505C},
+ {0x35400, 0x3545C},
+ {0x35800, 0x3585C},
+ {0x35C00, 0x35C5C},
+ {0x36000, 0x3605C},
+ {0x38000, 0x38064},
+ {0x38070, 0x380E0},
+ {0x3A000, 0x3A064},
+ {0x40000, 0x400A4},
+ {0x80000, 0x8000C},
+ {0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_section qca6174_hw30_sdio_register_sections[] = {
+ {0x800, 0x810},
+ {0x820, 0x82C},
+ {0x830, 0x8F4},
+ {0x90C, 0x91C},
+ {0xA14, 0xA18},
+ {0xA84, 0xA94},
+ {0xAA8, 0xAD4},
+ {0xADC, 0xB40},
+ {0x1000, 0x10A4},
+ {0x10BC, 0x111C},
+ {0x1134, 0x1138},
+ {0x1144, 0x114C},
+ {0x1150, 0x115C},
+ {0x1160, 0x1178},
+ {0x1240, 0x1260},
+ {0x2000, 0x207C},
+ {0x3000, 0x3014},
+ {0x4000, 0x4014},
+ {0x5000, 0x5124},
+ {0x6000, 0x6040},
+ {0x6080, 0x60CC},
+ {0x6100, 0x611C},
+ {0x6140, 0x61D8},
+ {0x6200, 0x6238},
+ {0x6240, 0x628C},
+ {0x62C0, 0x62EC},
+ {0x6380, 0x63E8},
+ {0x6400, 0x6440},
+ {0x6480, 0x64CC},
+ {0x6500, 0x651C},
+ {0x6540, 0x6580},
+ {0x6600, 0x6638},
+ {0x6640, 0x668C},
+ {0x66C0, 0x66EC},
+ {0x6780, 0x67E8},
+ {0x7080, 0x708C},
+ {0x70C0, 0x70C8},
+ {0x7400, 0x741C},
+ {0x7440, 0x7454},
+ {0x7800, 0x7818},
+ {0x8010, 0x8060},
+ {0x8080, 0x8084},
+ {0x80A0, 0x80A4},
+ {0x80C0, 0x80C4},
+ {0x80E0, 0x80ec},
+ {0x8110, 0x8128},
+ {0x9000, 0x9004},
+ {0xF000, 0xF0E0},
+ {0xF140, 0xF190},
+ {0xF250, 0xF25C},
+ {0xF260, 0xF268},
+ {0xF26C, 0xF2A8},
+ {0x10008, 0x1000C},
+ {0x10014, 0x10018},
+ {0x1001C, 0x10020},
+ {0x10024, 0x10028},
+ {0x10030, 0x10034},
+ {0x10040, 0x10054},
+ {0x10058, 0x1007C},
+ {0x10080, 0x100C4},
+ {0x100C8, 0x10114},
+ {0x1012C, 0x10130},
+ {0x10138, 0x10144},
+ {0x10200, 0x10220},
+ {0x10230, 0x10250},
+ {0x10260, 0x10280},
+ {0x10290, 0x102B0},
+ {0x102C0, 0x102DC},
+ {0x102E0, 0x102F4},
+ {0x102FC, 0x1037C},
+ {0x10380, 0x10390},
+ {0x10800, 0x10828},
+ {0x10840, 0x10844},
+ {0x10880, 0x10884},
+ {0x108C0, 0x108E8},
+ {0x10900, 0x10928},
+ {0x10940, 0x10944},
+ {0x10980, 0x10984},
+ {0x109C0, 0x109E8},
+ {0x10A00, 0x10A28},
+ {0x10A40, 0x10A50},
+ {0x11000, 0x11028},
+ {0x11030, 0x11034},
+ {0x11038, 0x11068},
+ {0x11070, 0x11074},
+ {0x11078, 0x110A8},
+ {0x110B0, 0x110B4},
+ {0x110B8, 0x110E8},
+ {0x110F0, 0x110F4},
+ {0x110F8, 0x11128},
+ {0x11138, 0x11144},
+ {0x11178, 0x11180},
+ {0x111B8, 0x111C0},
+ {0x111F8, 0x11200},
+ {0x11238, 0x1123C},
+ {0x11270, 0x11274},
+ {0x11278, 0x1127C},
+ {0x112B0, 0x112B4},
+ {0x112B8, 0x112BC},
+ {0x112F0, 0x112F4},
+ {0x112F8, 0x112FC},
+ {0x11338, 0x1133C},
+ {0x11378, 0x1137C},
+ {0x113B8, 0x113BC},
+ {0x113F8, 0x113FC},
+ {0x11438, 0x11440},
+ {0x11478, 0x11480},
+ {0x114B8, 0x114BC},
+ {0x114F8, 0x114FC},
+ {0x11538, 0x1153C},
+ {0x11578, 0x1157C},
+ {0x115B8, 0x115BC},
+ {0x115F8, 0x115FC},
+ {0x11638, 0x1163C},
+ {0x11678, 0x1167C},
+ {0x116B8, 0x116BC},
+ {0x116F8, 0x116FC},
+ {0x11738, 0x1173C},
+ {0x11778, 0x1177C},
+ {0x117B8, 0x117BC},
+ {0x117F8, 0x117FC},
+ {0x17000, 0x1701C},
+ {0x17020, 0x170AC},
+ {0x18000, 0x18050},
+ {0x18054, 0x18074},
+ {0x18080, 0x180D4},
+ {0x180DC, 0x18104},
+ {0x18108, 0x1813C},
+ {0x18144, 0x18148},
+ {0x18168, 0x18174},
+ {0x18178, 0x18180},
+ {0x181C8, 0x181E0},
+ {0x181E4, 0x181E8},
+ {0x181EC, 0x1820C},
+ {0x1825C, 0x18280},
+ {0x18284, 0x18290},
+ {0x18294, 0x182A0},
+ {0x18300, 0x18304},
+ {0x18314, 0x18320},
+ {0x18328, 0x18350},
+ {0x1835C, 0x1836C},
+ {0x18370, 0x18390},
+ {0x18398, 0x183AC},
+ {0x183BC, 0x183D8},
+ {0x183DC, 0x183F4},
+ {0x18400, 0x186F4},
+ {0x186F8, 0x1871C},
+ {0x18720, 0x18790},
+ {0x19800, 0x19830},
+ {0x19834, 0x19840},
+ {0x19880, 0x1989C},
+ {0x198A4, 0x198B0},
+ {0x198BC, 0x19900},
+ {0x19C00, 0x19C88},
+ {0x19D00, 0x19D20},
+ {0x19E00, 0x19E7C},
+ {0x19E80, 0x19E94},
+ {0x19E98, 0x19EAC},
+ {0x19EB0, 0x19EBC},
+ {0x19F70, 0x19F74},
+ {0x19F80, 0x19F8C},
+ {0x19FA0, 0x19FB4},
+ {0x19FC0, 0x19FD8},
+ {0x1A000, 0x1A200},
+ {0x1A204, 0x1A210},
+ {0x1A228, 0x1A22C},
+ {0x1A230, 0x1A248},
+ {0x1A250, 0x1A270},
+ {0x1A280, 0x1A290},
+ {0x1A2A0, 0x1A2A4},
+ {0x1A2C0, 0x1A2EC},
+ {0x1A300, 0x1A3BC},
+ {0x1A3F0, 0x1A3F4},
+ {0x1A3F8, 0x1A434},
+ {0x1A438, 0x1A444},
+ {0x1A448, 0x1A468},
+ {0x1A580, 0x1A58C},
+ {0x1A644, 0x1A654},
+ {0x1A670, 0x1A698},
+ {0x1A6AC, 0x1A6B0},
+ {0x1A6D0, 0x1A6D4},
+ {0x1A6EC, 0x1A70C},
+ {0x1A710, 0x1A738},
+ {0x1A7C0, 0x1A7D0},
+ {0x1A7D4, 0x1A7D8},
+ {0x1A7DC, 0x1A7E4},
+ {0x1A7F0, 0x1A7F8},
+ {0x1A888, 0x1A89C},
+ {0x1A8A8, 0x1A8AC},
+ {0x1A8C0, 0x1A8DC},
+ {0x1A8F0, 0x1A8FC},
+ {0x1AE04, 0x1AE08},
+ {0x1AE18, 0x1AE24},
+ {0x1AF80, 0x1AF8C},
+ {0x1AFA0, 0x1AFB4},
+ {0x1B000, 0x1B200},
+ {0x1B284, 0x1B288},
+ {0x1B2D0, 0x1B2D8},
+ {0x1B2DC, 0x1B2EC},
+ {0x1B300, 0x1B340},
+ {0x1B374, 0x1B378},
+ {0x1B380, 0x1B384},
+ {0x1B388, 0x1B38C},
+ {0x1B404, 0x1B408},
+ {0x1B420, 0x1B428},
+ {0x1B440, 0x1B444},
+ {0x1B448, 0x1B44C},
+ {0x1B450, 0x1B458},
+ {0x1B45C, 0x1B468},
+ {0x1B584, 0x1B58C},
+ {0x1B68C, 0x1B690},
+ {0x1B6AC, 0x1B6B0},
+ {0x1B7F0, 0x1B7F8},
+ {0x1C800, 0x1CC00},
+ {0x1CE00, 0x1CE04},
+ {0x1CF80, 0x1CF84},
+ {0x1D200, 0x1D800},
+ {0x1E000, 0x20014},
+ {0x20100, 0x20124},
+ {0x21400, 0x217A8},
+ {0x21800, 0x21BA8},
+ {0x21C00, 0x21FA8},
+ {0x22000, 0x223A8},
+ {0x22400, 0x227A8},
+ {0x22800, 0x22BA8},
+ {0x22C00, 0x22FA8},
+ {0x23000, 0x233A8},
+ {0x24000, 0x24034},
+
+ /* EFUSE0,1,2 is disabled here
+ * because its state may be reset
+ *
+ * {0x24800, 0x24804},
+ * {0x25000, 0x25004},
+ * {0x25800, 0x25804},
+ */
+
+ {0x26000, 0x26064},
+ {0x27000, 0x27024},
+ {0x34000, 0x3400C},
+ {0x34400, 0x3445C},
+ {0x34800, 0x3485C},
+ {0x34C00, 0x34C5C},
+ {0x35000, 0x3505C},
+ {0x35400, 0x3545C},
+ {0x35800, 0x3585C},
+ {0x35C00, 0x35C5C},
+ {0x36000, 0x3605C},
+ {0x38000, 0x38064},
+ {0x38070, 0x380E0},
+ {0x3A000, 0x3A074},
+
+ /* DBI windows is skipped here, it can be only accessed when pcie
+ * is active (not in reset) and CORE_CTRL_PCIE_LTSSM_EN = 0 &&
+ * PCIE_CTRL_APP_LTSSM_ENALBE=0.
+ * {0x3C000 , 0x3C004},
+ */
+
+ {0x40000, 0x400A4},
+
+ /* SI register is skipped here.
+ * Because it will cause bus hang
+ *
+ * {0x50000, 0x50018},
+ */
+
+ {0x80000, 0x8000C},
+ {0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_section qca6174_hw30_register_sections[] = {
+ {0x800, 0x810},
+ {0x820, 0x82C},
+ {0x830, 0x8F4},
+ {0x90C, 0x91C},
+ {0xA14, 0xA18},
+ {0xA84, 0xA94},
+ {0xAA8, 0xAD4},
+ {0xADC, 0xB40},
+ {0x1000, 0x10A4},
+ {0x10BC, 0x111C},
+ {0x1134, 0x1138},
+ {0x1144, 0x114C},
+ {0x1150, 0x115C},
+ {0x1160, 0x1178},
+ {0x1240, 0x1260},
+ {0x2000, 0x207C},
+ {0x3000, 0x3014},
+ {0x4000, 0x4014},
+ {0x5000, 0x5124},
+ {0x6000, 0x6040},
+ {0x6080, 0x60CC},
+ {0x6100, 0x611C},
+ {0x6140, 0x61D8},
+ {0x6200, 0x6238},
+ {0x6240, 0x628C},
+ {0x62C0, 0x62EC},
+ {0x6380, 0x63E8},
+ {0x6400, 0x6440},
+ {0x6480, 0x64CC},
+ {0x6500, 0x651C},
+ {0x6540, 0x6580},
+ {0x6600, 0x6638},
+ {0x6640, 0x668C},
+ {0x66C0, 0x66EC},
+ {0x6780, 0x67E8},
+ {0x7080, 0x708C},
+ {0x70C0, 0x70C8},
+ {0x7400, 0x741C},
+ {0x7440, 0x7454},
+ {0x7800, 0x7818},
+ {0x8000, 0x8004},
+ {0x8010, 0x8064},
+ {0x8080, 0x8084},
+ {0x80A0, 0x80A4},
+ {0x80C0, 0x80C4},
+ {0x80E0, 0x80F4},
+ {0x8100, 0x8104},
+ {0x8110, 0x812C},
+ {0x9000, 0x9004},
+ {0x9800, 0x982C},
+ {0x9830, 0x9838},
+ {0x9840, 0x986C},
+ {0x9870, 0x9898},
+ {0x9A00, 0x9C00},
+ {0xD580, 0xD59C},
+ {0xF000, 0xF0E0},
+ {0xF140, 0xF190},
+ {0xF250, 0xF25C},
+ {0xF260, 0xF268},
+ {0xF26C, 0xF2A8},
+ {0x10008, 0x1000C},
+ {0x10014, 0x10018},
+ {0x1001C, 0x10020},
+ {0x10024, 0x10028},
+ {0x10030, 0x10034},
+ {0x10040, 0x10054},
+ {0x10058, 0x1007C},
+ {0x10080, 0x100C4},
+ {0x100C8, 0x10114},
+ {0x1012C, 0x10130},
+ {0x10138, 0x10144},
+ {0x10200, 0x10220},
+ {0x10230, 0x10250},
+ {0x10260, 0x10280},
+ {0x10290, 0x102B0},
+ {0x102C0, 0x102DC},
+ {0x102E0, 0x102F4},
+ {0x102FC, 0x1037C},
+ {0x10380, 0x10390},
+ {0x10800, 0x10828},
+ {0x10840, 0x10844},
+ {0x10880, 0x10884},
+ {0x108C0, 0x108E8},
+ {0x10900, 0x10928},
+ {0x10940, 0x10944},
+ {0x10980, 0x10984},
+ {0x109C0, 0x109E8},
+ {0x10A00, 0x10A28},
+ {0x10A40, 0x10A50},
+ {0x11000, 0x11028},
+ {0x11030, 0x11034},
+ {0x11038, 0x11068},
+ {0x11070, 0x11074},
+ {0x11078, 0x110A8},
+ {0x110B0, 0x110B4},
+ {0x110B8, 0x110E8},
+ {0x110F0, 0x110F4},
+ {0x110F8, 0x11128},
+ {0x11138, 0x11144},
+ {0x11178, 0x11180},
+ {0x111B8, 0x111C0},
+ {0x111F8, 0x11200},
+ {0x11238, 0x1123C},
+ {0x11270, 0x11274},
+ {0x11278, 0x1127C},
+ {0x112B0, 0x112B4},
+ {0x112B8, 0x112BC},
+ {0x112F0, 0x112F4},
+ {0x112F8, 0x112FC},
+ {0x11338, 0x1133C},
+ {0x11378, 0x1137C},
+ {0x113B8, 0x113BC},
+ {0x113F8, 0x113FC},
+ {0x11438, 0x11440},
+ {0x11478, 0x11480},
+ {0x114B8, 0x114BC},
+ {0x114F8, 0x114FC},
+ {0x11538, 0x1153C},
+ {0x11578, 0x1157C},
+ {0x115B8, 0x115BC},
+ {0x115F8, 0x115FC},
+ {0x11638, 0x1163C},
+ {0x11678, 0x1167C},
+ {0x116B8, 0x116BC},
+ {0x116F8, 0x116FC},
+ {0x11738, 0x1173C},
+ {0x11778, 0x1177C},
+ {0x117B8, 0x117BC},
+ {0x117F8, 0x117FC},
+ {0x17000, 0x1701C},
+ {0x17020, 0x170AC},
+ {0x18000, 0x18050},
+ {0x18054, 0x18074},
+ {0x18080, 0x180D4},
+ {0x180DC, 0x18104},
+ {0x18108, 0x1813C},
+ {0x18144, 0x18148},
+ {0x18168, 0x18174},
+ {0x18178, 0x18180},
+ {0x181C8, 0x181E0},
+ {0x181E4, 0x181E8},
+ {0x181EC, 0x1820C},
+ {0x1825C, 0x18280},
+ {0x18284, 0x18290},
+ {0x18294, 0x182A0},
+ {0x18300, 0x18304},
+ {0x18314, 0x18320},
+ {0x18328, 0x18350},
+ {0x1835C, 0x1836C},
+ {0x18370, 0x18390},
+ {0x18398, 0x183AC},
+ {0x183BC, 0x183D8},
+ {0x183DC, 0x183F4},
+ {0x18400, 0x186F4},
+ {0x186F8, 0x1871C},
+ {0x18720, 0x18790},
+ {0x19800, 0x19830},
+ {0x19834, 0x19840},
+ {0x19880, 0x1989C},
+ {0x198A4, 0x198B0},
+ {0x198BC, 0x19900},
+ {0x19C00, 0x19C88},
+ {0x19D00, 0x19D20},
+ {0x19E00, 0x19E7C},
+ {0x19E80, 0x19E94},
+ {0x19E98, 0x19EAC},
+ {0x19EB0, 0x19EBC},
+ {0x19F70, 0x19F74},
+ {0x19F80, 0x19F8C},
+ {0x19FA0, 0x19FB4},
+ {0x19FC0, 0x19FD8},
+ {0x1A000, 0x1A200},
+ {0x1A204, 0x1A210},
+ {0x1A228, 0x1A22C},
+ {0x1A230, 0x1A248},
+ {0x1A250, 0x1A270},
+ {0x1A280, 0x1A290},
+ {0x1A2A0, 0x1A2A4},
+ {0x1A2C0, 0x1A2EC},
+ {0x1A300, 0x1A3BC},
+ {0x1A3F0, 0x1A3F4},
+ {0x1A3F8, 0x1A434},
+ {0x1A438, 0x1A444},
+ {0x1A448, 0x1A468},
+ {0x1A580, 0x1A58C},
+ {0x1A644, 0x1A654},
+ {0x1A670, 0x1A698},
+ {0x1A6AC, 0x1A6B0},
+ {0x1A6D0, 0x1A6D4},
+ {0x1A6EC, 0x1A70C},
+ {0x1A710, 0x1A738},
+ {0x1A7C0, 0x1A7D0},
+ {0x1A7D4, 0x1A7D8},
+ {0x1A7DC, 0x1A7E4},
+ {0x1A7F0, 0x1A7F8},
+ {0x1A888, 0x1A89C},
+ {0x1A8A8, 0x1A8AC},
+ {0x1A8C0, 0x1A8DC},
+ {0x1A8F0, 0x1A8FC},
+ {0x1AE04, 0x1AE08},
+ {0x1AE18, 0x1AE24},
+ {0x1AF80, 0x1AF8C},
+ {0x1AFA0, 0x1AFB4},
+ {0x1B000, 0x1B200},
+ {0x1B284, 0x1B288},
+ {0x1B2D0, 0x1B2D8},
+ {0x1B2DC, 0x1B2EC},
+ {0x1B300, 0x1B340},
+ {0x1B374, 0x1B378},
+ {0x1B380, 0x1B384},
+ {0x1B388, 0x1B38C},
+ {0x1B404, 0x1B408},
+ {0x1B420, 0x1B428},
+ {0x1B440, 0x1B444},
+ {0x1B448, 0x1B44C},
+ {0x1B450, 0x1B458},
+ {0x1B45C, 0x1B468},
+ {0x1B584, 0x1B58C},
+ {0x1B68C, 0x1B690},
+ {0x1B6AC, 0x1B6B0},
+ {0x1B7F0, 0x1B7F8},
+ {0x1C800, 0x1CC00},
+ {0x1CE00, 0x1CE04},
+ {0x1CF80, 0x1CF84},
+ {0x1D200, 0x1D800},
+ {0x1E000, 0x20014},
+ {0x20100, 0x20124},
+ {0x21400, 0x217A8},
+ {0x21800, 0x21BA8},
+ {0x21C00, 0x21FA8},
+ {0x22000, 0x223A8},
+ {0x22400, 0x227A8},
+ {0x22800, 0x22BA8},
+ {0x22C00, 0x22FA8},
+ {0x23000, 0x233A8},
+ {0x24000, 0x24034},
+ {0x26000, 0x26064},
+ {0x27000, 0x27024},
+ {0x34000, 0x3400C},
+ {0x34400, 0x3445C},
+ {0x34800, 0x3485C},
+ {0x34C00, 0x34C5C},
+ {0x35000, 0x3505C},
+ {0x35400, 0x3545C},
+ {0x35800, 0x3585C},
+ {0x35C00, 0x35C5C},
+ {0x36000, 0x3605C},
+ {0x38000, 0x38064},
+ {0x38070, 0x380E0},
+ {0x3A000, 0x3A074},
+ {0x40000, 0x400A4},
+ {0x80000, 0x8000C},
+ {0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_region qca6174_hw10_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x70000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+
+ /* RTC_SOC_BASE_ADDRESS */
+ .start = 0x0,
+
+ /* WLAN_MBOX_BASE_ADDRESS - RTC_SOC_BASE_ADDRESS */
+ .len = 0x800 - 0x0,
+
+ .name = "REG_PART1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+
+ /* STEREO_BASE_ADDRESS */
+ .start = 0x27000,
+
+ /* USB_BASE_ADDRESS - STEREO_BASE_ADDRESS */
+ .len = 0x60000 - 0x27000,
+
+ .name = "REG_PART2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca6174_hw21_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x70000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_AXI,
+ .start = 0xa0000,
+ .len = 0x18000,
+ .name = "AXI",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x800,
+ .len = 0x80020 - 0x800,
+ .name = "REG_TOTAL",
+ .section_table = {
+ .sections = qca6174_hw21_register_sections,
+ .size = ARRAY_SIZE(qca6174_hw21_register_sections),
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca6174_hw30_sdio_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0xa8000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_AXI,
+ .start = 0xa0000,
+ .len = 0x18000,
+ .name = "AXI",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM1,
+ .start = 0x00980000,
+ .len = 0x00080000,
+ .name = "IRAM1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM2,
+ .start = 0x00a00000,
+ .len = 0x00040000,
+ .name = "IRAM2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x800,
+ .len = 0x80020 - 0x800,
+ .name = "REG_TOTAL",
+ .section_table = {
+ .sections = qca6174_hw30_sdio_register_sections,
+ .size = ARRAY_SIZE(qca6174_hw30_sdio_register_sections),
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0xa8000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_AXI,
+ .start = 0xa0000,
+ .len = 0x18000,
+ .name = "AXI",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x800,
+ .len = 0x80020 - 0x800,
+ .name = "REG_TOTAL",
+ .section_table = {
+ .sections = qca6174_hw30_register_sections,
+ .size = ARRAY_SIZE(qca6174_hw30_register_sections),
+ },
+ },
+
+ /* IRAM dump must be put last */
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM1,
+ .start = 0x00980000,
+ .len = 0x00080000,
+ .name = "IRAM1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM2,
+ .start = 0x00a00000,
+ .len = 0x00040000,
+ .name = "IRAM2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca988x_hw20_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x50000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x4000,
+ .len = 0x2000,
+ .name = "REG_PART1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x8000,
+ .len = 0x58000,
+ .name = "REG_PART2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca99x0_hw20_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x60000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x980000,
+ .len = 0x50000,
+ .name = "IRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOSRAM,
+ .start = 0xC0000,
+ .len = 0x40000,
+ .name = "SRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x30000,
+ .len = 0x7000,
+ .name = "APB REG 1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x3f000,
+ .len = 0x3000,
+ .name = "APB REG 2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x43000,
+ .len = 0x3000,
+ .name = "WIFI REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x4A000,
+ .len = 0x5000,
+ .name = "CE REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x80000,
+ .len = 0x6000,
+ .name = "SOC REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca9984_hw10_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x80000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x980000,
+ .len = 0x50000,
+ .name = "IRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOSRAM,
+ .start = 0xC0000,
+ .len = 0x40000,
+ .name = "SRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x30000,
+ .len = 0x7000,
+ .name = "APB REG 1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x3f000,
+ .len = 0x3000,
+ .name = "APB REG 2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x43000,
+ .len = 0x3000,
+ .name = "WIFI REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x4A000,
+ .len = 0x5000,
+ .name = "CE REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x80000,
+ .len = 0x6000,
+ .name = "SOC REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_mem_section ipq4019_soc_reg_range[] = {
+ {0x080000, 0x080004},
+ {0x080020, 0x080024},
+ {0x080028, 0x080050},
+ {0x0800d4, 0x0800ec},
+ {0x08010c, 0x080118},
+ {0x080284, 0x080290},
+ {0x0802a8, 0x0802b8},
+ {0x0802dc, 0x08030c},
+ {0x082000, 0x083fff}
+};
+
+static const struct ath10k_mem_region qca4019_hw10_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x68000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0xC0000,
+ .len = 0x40000,
+ .name = "SRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x980000,
+ .len = 0x50000,
+ .name = "IRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x30000,
+ .len = 0x7000,
+ .name = "APB REG 1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x3f000,
+ .len = 0x3000,
+ .name = "APB REG 2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x43000,
+ .len = 0x3000,
+ .name = "WIFI REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x4A000,
+ .len = 0x5000,
+ .name = "CE REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x080000,
+ .len = 0x083fff - 0x080000,
+ .name = "REG_TOTAL",
+ .section_table = {
+ .sections = ipq4019_soc_reg_range,
+ .size = ARRAY_SIZE(ipq4019_soc_reg_range),
+ },
+ },
+};
+
+static const struct ath10k_mem_region wcn399x_hw10_mem_regions[] = {
+ {
+ /* MSA region start is not fixed, hence it is assigned at runtime */
+ .type = ATH10K_MEM_REGION_TYPE_MSA,
+ .len = 0x100000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
+ {
+ .hw_id = QCA6174_HW_1_0_VERSION,
+ .hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
+ .region_table = {
+ .regions = qca6174_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_1_1_VERSION,
+ .hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
+ .region_table = {
+ .regions = qca6174_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_1_3_VERSION,
+ .hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
+ .region_table = {
+ .regions = qca6174_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_2_1_VERSION,
+ .hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
+ .region_table = {
+ .regions = qca6174_hw21_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw21_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_3_0_VERSION,
+ .hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
+ .region_table = {
+ .regions = qca6174_hw30_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_3_2_VERSION,
+ .hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
+ .region_table = {
+ .regions = qca6174_hw30_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_3_2_VERSION,
+ .hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_SDIO,
+ .region_table = {
+ .regions = qca6174_hw30_sdio_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_sdio_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA9377_HW_1_1_DEV_VERSION,
+ .hw_rev = ATH10K_HW_QCA9377,
+ .bus = ATH10K_BUS_PCI,
+ .region_table = {
+ .regions = qca6174_hw30_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA988X_HW_2_0_VERSION,
+ .hw_rev = ATH10K_HW_QCA988X,
+ .bus = ATH10K_BUS_PCI,
+ .region_table = {
+ .regions = qca988x_hw20_mem_regions,
+ .size = ARRAY_SIZE(qca988x_hw20_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA9984_HW_1_0_DEV_VERSION,
+ .hw_rev = ATH10K_HW_QCA9984,
+ .bus = ATH10K_BUS_PCI,
+ .region_table = {
+ .regions = qca9984_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca9984_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA9888_HW_2_0_DEV_VERSION,
+ .hw_rev = ATH10K_HW_QCA9888,
+ .bus = ATH10K_BUS_PCI,
+ .region_table = {
+ .regions = qca9984_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca9984_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA99X0_HW_2_0_DEV_VERSION,
+ .hw_rev = ATH10K_HW_QCA99X0,
+ .bus = ATH10K_BUS_PCI,
+ .region_table = {
+ .regions = qca99x0_hw20_mem_regions,
+ .size = ARRAY_SIZE(qca99x0_hw20_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA4019_HW_1_0_DEV_VERSION,
+ .hw_rev = ATH10K_HW_QCA4019,
+ .bus = ATH10K_BUS_AHB,
+ .region_table = {
+ .regions = qca4019_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca4019_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = WCN3990_HW_1_0_DEV_VERSION,
+ .hw_rev = ATH10K_HW_WCN3990,
+ .bus = ATH10K_BUS_SNOC,
+ .region_table = {
+ .regions = wcn399x_hw10_mem_regions,
+ .size = ARRAY_SIZE(wcn399x_hw10_mem_regions),
+ },
+ },
+};
+
+static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
+{
+ const struct ath10k_hw_mem_layout *hw;
+ const struct ath10k_mem_region *mem_region;
+ size_t size = 0;
+ int i;
+
+ hw = ath10k_coredump_get_mem_layout(ar);
+
+ if (!hw)
+ return 0;
+
+ mem_region = &hw->region_table.regions[0];
+
+ for (i = 0; i < hw->region_table.size; i++) {
+ size += mem_region->len;
+ mem_region++;
+ }
+
+ /* reserve space for the headers */
+ size += hw->region_table.size * sizeof(struct ath10k_dump_ram_data_hdr);
+
+ /* make sure it is aligned 16 bytes for debug message print out */
+ size = ALIGN(size, 16);
+
+ return size;
+}
+
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+ if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+ return NULL;
+
+ return _ath10k_coredump_get_mem_layout(ar);
+}
+EXPORT_SYMBOL(ath10k_coredump_get_mem_layout);
+
+const struct ath10k_hw_mem_layout *_ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+ int i;
+
+ if (WARN_ON(ar->target_version == 0))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) {
+ if (ar->target_version == hw_mem_layouts[i].hw_id &&
+ ar->hw_rev == hw_mem_layouts[i].hw_rev &&
+ hw_mem_layouts[i].bus == ar->hif.bus)
+ return &hw_mem_layouts[i];
+ }
+
+ return NULL;
+}
+
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+ lockdep_assert_held(&ar->dump_mutex);
+
+ if (ath10k_coredump_mask == 0)
+ /* coredump disabled */
+ return NULL;
+
+ guid_gen(&crash_data->guid);
+ ktime_get_real_ts64(&crash_data->timestamp);
+
+ return crash_data;
+}
+EXPORT_SYMBOL(ath10k_coredump_new);
+
+static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+ struct ath10k_ce_crash_hdr *ce_hdr;
+ struct ath10k_dump_file_data *dump_data;
+ struct ath10k_tlv_dump_data *dump_tlv;
+ size_t hdr_len = sizeof(*dump_data);
+ size_t len, sofar = 0;
+ unsigned char *buf;
+
+ len = hdr_len;
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask))
+ len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask))
+ len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]);
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+ len += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+
+ sofar += hdr_len;
+
+ /* This is going to get big when we start dumping FW RAM and such,
+ * so go ahead and use vmalloc.
+ */
+ buf = vzalloc(len);
+ if (!buf)
+ return NULL;
+
+ mutex_lock(&ar->dump_mutex);
+
+ dump_data = (struct ath10k_dump_file_data *)(buf);
+ strscpy(dump_data->df_magic, "ATH10K-FW-DUMP",
+ sizeof(dump_data->df_magic));
+ dump_data->len = cpu_to_le32(len);
+
+ dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
+
+ guid_copy(&dump_data->guid, &crash_data->guid);
+ dump_data->chip_id = cpu_to_le32(ar->bus_param.chip_id);
+ dump_data->bus_type = cpu_to_le32(0);
+ dump_data->target_version = cpu_to_le32(ar->target_version);
+ dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
+ dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
+ dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
+ dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
+ dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
+ dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
+ dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
+ dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
+ dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
+ dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
+
+ strscpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
+ sizeof(dump_data->fw_ver));
+
+ dump_data->kernel_ver_code = 0;
+ strscpy(dump_data->kernel_ver, init_utsname()->release,
+ sizeof(dump_data->kernel_ver));
+
+ dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
+ dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) {
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
+ dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
+ memcpy(dump_tlv->tlv_data, &crash_data->registers,
+ sizeof(crash_data->registers));
+ sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+ }
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) {
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(struct_size(ce_hdr, entries,
+ CE_COUNT));
+ ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
+ ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
+ memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
+ memcpy(ce_hdr->entries, crash_data->ce_crash_data,
+ CE_COUNT * sizeof(ce_hdr->entries[0]));
+ sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]);
+ }
+
+ /* Gather ram dump */
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len);
+ if (crash_data->ramdump_buf_len) {
+ memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
+ crash_data->ramdump_buf_len);
+ sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+ }
+ }
+
+ mutex_unlock(&ar->dump_mutex);
+
+ return dump_data;
+}
+
+int ath10k_coredump_submit(struct ath10k *ar)
+{
+ struct ath10k_dump_file_data *dump;
+
+ if (ath10k_coredump_mask == 0)
+ /* coredump disabled */
+ return 0;
+
+ dump = ath10k_coredump_build(ar);
+ if (!dump) {
+ ath10k_warn(ar, "no crash dump data found for devcoredump");
+ return -ENODATA;
+ }
+
+ dev_coredumpv(ar->dev, dump, le32_to_cpu(dump->len), GFP_KERNEL);
+
+ return 0;
+}
+
+int ath10k_coredump_create(struct ath10k *ar)
+{
+ if (ath10k_coredump_mask == 0)
+ /* coredump disabled */
+ return 0;
+
+ ar->coredump.fw_crash_data = vzalloc(sizeof(*ar->coredump.fw_crash_data));
+ if (!ar->coredump.fw_crash_data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int ath10k_coredump_register(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+ crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar);
+
+ if (!crash_data->ramdump_buf_len)
+ return 0;
+
+ crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len);
+ if (!crash_data->ramdump_buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ath10k_coredump_unregister(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+ vfree(crash_data->ramdump_buf);
+}
+
+void ath10k_coredump_destroy(struct ath10k *ar)
+{
+ if (ar->coredump.fw_crash_data->ramdump_buf) {
+ vfree(ar->coredump.fw_crash_data->ramdump_buf);
+ ar->coredump.fw_crash_data->ramdump_buf = NULL;
+ ar->coredump.fw_crash_data->ramdump_buf_len = 0;
+ }
+
+ vfree(ar->coredump.fw_crash_data);
+ ar->coredump.fw_crash_data = NULL;
+}
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
new file mode 100644
index 000000000000..8d274e0f374b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _COREDUMP_H_
+#define _COREDUMP_H_
+
+#include "core.h"
+
+#define ATH10K_FW_CRASH_DUMP_VERSION 1
+
+/**
+ * enum ath10k_fw_crash_dump_type - types of data in the dump file
+ * @ATH10K_FW_CRASH_DUMP_REGISTERS: Register crash dump in binary format
+ * @ATH10K_FW_CRASH_DUMP_CE_DATA: Copy Engine crash dump data
+ * @ATH10K_FW_CRASH_DUMP_RAM_DATA: RAM crash dump data, contains multiple
+ * struct ath10k_dump_ram_data_hdr
+ * @ATH10K_FW_CRASH_DUMP_MAX: Maximum enumeration
+ */
+enum ath10k_fw_crash_dump_type {
+ ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
+ ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
+
+ /* contains multiple struct ath10k_dump_ram_data_hdr */
+ ATH10K_FW_CRASH_DUMP_RAM_DATA = 2,
+
+ ATH10K_FW_CRASH_DUMP_MAX,
+};
+
+struct ath10k_tlv_dump_data {
+ /* see ath10k_fw_crash_dump_type above */
+ __le32 type;
+
+ /* in bytes */
+ __le32 tlv_len;
+
+ /* pad to 32-bit boundaries as needed */
+ u8 tlv_data[];
+} __packed;
+
+struct ath10k_dump_file_data {
+ /* dump file information */
+
+ /* "ATH10K-FW-DUMP" */
+ char df_magic[16];
+
+ __le32 len;
+
+ /* file dump version */
+ __le32 version;
+
+ /* some info we can get from ath10k struct that might help */
+
+ guid_t guid;
+
+ __le32 chip_id;
+
+ /* 0 for now, in place for later hardware */
+ __le32 bus_type;
+
+ __le32 target_version;
+ __le32 fw_version_major;
+ __le32 fw_version_minor;
+ __le32 fw_version_release;
+ __le32 fw_version_build;
+ __le32 phy_capability;
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ __le32 ht_cap_info;
+ __le32 vht_cap_info;
+ __le32 num_rf_chains;
+
+ /* firmware version string */
+ char fw_ver[ETHTOOL_FWVERS_LEN];
+
+ /* Kernel related information */
+
+ /* time-of-day stamp */
+ __le64 tv_sec;
+
+ /* time-of-day stamp, nano-seconds */
+ __le64 tv_nsec;
+
+ /* LINUX_VERSION_CODE */
+ __le32 kernel_ver_code;
+
+ /* VERMAGIC_STRING */
+ char kernel_ver[64];
+
+ /* room for growth w/out changing binary format */
+ u8 unused[128];
+
+ /* struct ath10k_tlv_dump_data + more */
+ u8 data[];
+} __packed;
+
+struct ath10k_dump_ram_data_hdr {
+ /* enum ath10k_mem_region_type */
+ __le32 region_type;
+
+ __le32 start;
+
+ /* length of payload data, not including this header */
+ __le32 length;
+
+ u8 data[];
+};
+
+/* magic number to fill the holes not copied due to sections in regions */
+#define ATH10K_MAGIC_NOT_COPIED 0xAA
+
+/* part of user space ABI */
+enum ath10k_mem_region_type {
+ ATH10K_MEM_REGION_TYPE_REG = 1,
+ ATH10K_MEM_REGION_TYPE_DRAM = 2,
+ ATH10K_MEM_REGION_TYPE_AXI = 3,
+ ATH10K_MEM_REGION_TYPE_IRAM1 = 4,
+ ATH10K_MEM_REGION_TYPE_IRAM2 = 5,
+ ATH10K_MEM_REGION_TYPE_IOSRAM = 6,
+ ATH10K_MEM_REGION_TYPE_IOREG = 7,
+ ATH10K_MEM_REGION_TYPE_MSA = 8,
+};
+
+/* Define a section of the region which should be copied. As not all parts
+ * of the memory is possible to copy, for example some of the registers can
+ * be like that, sections can be used to define what is safe to copy.
+ *
+ * To minimize the size of the array, the list must obey the format:
+ * '{start0,stop0},{start1,stop1},{start2,stop2}....' The values below must
+ * also obey to 'start0 < stop0 < start1 < stop1 < start2 < ...', otherwise
+ * we may encounter error in the dump processing.
+ */
+struct ath10k_mem_section {
+ u32 start;
+ u32 end;
+};
+
+/* One region of a memory layout. If the sections field is null entire
+ * region is copied. If sections is non-null only the areas specified in
+ * sections are copied and rest of the areas are filled with
+ * ATH10K_MAGIC_NOT_COPIED.
+ */
+struct ath10k_mem_region {
+ enum ath10k_mem_region_type type;
+ u32 start;
+ u32 len;
+
+ const char *name;
+
+ struct {
+ const struct ath10k_mem_section *sections;
+ u32 size;
+ } section_table;
+};
+
+/* Contains the memory layout of a hardware version identified with the
+ * hardware id, split into regions.
+ */
+struct ath10k_hw_mem_layout {
+ u32 hw_id;
+ u32 hw_rev;
+ enum ath10k_bus bus;
+
+ struct {
+ const struct ath10k_mem_region *regions;
+ int size;
+ } region_table;
+};
+
+/* FIXME: where to put this? */
+extern unsigned long ath10k_coredump_mask;
+
+#ifdef CONFIG_DEV_COREDUMP
+
+int ath10k_coredump_submit(struct ath10k *ar);
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar);
+int ath10k_coredump_create(struct ath10k *ar);
+int ath10k_coredump_register(struct ath10k *ar);
+void ath10k_coredump_unregister(struct ath10k *ar);
+void ath10k_coredump_destroy(struct ath10k *ar);
+
+const struct ath10k_hw_mem_layout *_ath10k_coredump_get_mem_layout(struct ath10k *ar);
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar);
+
+#else /* CONFIG_DEV_COREDUMP */
+
+static inline int ath10k_coredump_submit(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+ return NULL;
+}
+
+static inline int ath10k_coredump_create(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline int ath10k_coredump_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_coredump_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_coredump_destroy(struct ath10k *ar)
+{
+}
+
+static inline const struct ath10k_hw_mem_layout *
+ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+ return NULL;
+}
+
+static inline const struct ath10k_hw_mem_layout *
+_ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_DEV_COREDUMP */
+
+#endif /* _COREDUMP_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
new file mode 100644
index 000000000000..b7520220465a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -0,0 +1,2710 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+#include <linux/firmware.h>
+#include <linux/kstrtox.h>
+
+#include "core.h"
+#include "debug.h"
+#include "hif.h"
+#include "wmi-ops.h"
+
+/* ms */
+#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+
+#define ATH10K_DEBUG_CAL_DATA_LEN 12064
+
+void ath10k_info(struct ath10k *ar, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_info(ar->dev, "%pV", &vaf);
+ trace_ath10k_log_info(ar, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(ath10k_info);
+
+void ath10k_debug_print_hwfw_info(struct ath10k *ar)
+{
+ const struct firmware *firmware;
+ char fw_features[128] = {};
+ u32 crc = 0;
+
+ ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
+
+ ath10k_info(ar, "%s target 0x%08x chip_id 0x%08x sub %04x:%04x",
+ ar->hw_params.name,
+ ar->target_version,
+ ar->bus_param.chip_id,
+ ar->id.subsystem_vendor, ar->id.subsystem_device);
+
+ ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n",
+ IS_ENABLED(CONFIG_ATH10K_DEBUG),
+ IS_ENABLED(CONFIG_ATH10K_DEBUGFS),
+ IS_ENABLED(CONFIG_ATH10K_TRACING),
+ IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED),
+ IS_ENABLED(CONFIG_NL80211_TESTMODE));
+
+ firmware = ar->normal_mode_fw.fw_file.firmware;
+ if (firmware)
+ crc = crc32_le(0, firmware->data, firmware->size);
+
+ ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n",
+ ar->hw->wiphy->fw_version,
+ ar->fw_api,
+ fw_features,
+ crc);
+}
+
+void ath10k_debug_print_board_info(struct ath10k *ar)
+{
+ char boardinfo[100];
+ const struct firmware *board;
+ u32 crc;
+
+ if (ar->id.bmi_ids_valid)
+ scnprintf(boardinfo, sizeof(boardinfo), "%d:%d",
+ ar->id.bmi_chip_id, ar->id.bmi_board_id);
+ else
+ scnprintf(boardinfo, sizeof(boardinfo), "N/A");
+
+ board = ar->normal_mode_fw.board;
+ if (!IS_ERR_OR_NULL(board))
+ crc = crc32_le(0, board->data, board->size);
+ else
+ crc = 0;
+
+ ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
+ ar->bd_api,
+ boardinfo,
+ crc);
+}
+
+void ath10k_debug_print_boot_info(struct ath10k *ar)
+{
+ ath10k_info(ar, "htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d\n",
+ ar->htt.target_version_major,
+ ar->htt.target_version_minor,
+ ar->normal_mode_fw.fw_file.wmi_op_version,
+ ar->normal_mode_fw.fw_file.htt_op_version,
+ ath10k_cal_mode_str(ar->cal_mode),
+ ar->max_num_stations,
+ test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
+ !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags));
+}
+
+void ath10k_print_driver_info(struct ath10k *ar)
+{
+ ath10k_debug_print_hwfw_info(ar);
+ ath10k_debug_print_board_info(ar);
+ ath10k_debug_print_boot_info(ar);
+}
+EXPORT_SYMBOL(ath10k_print_driver_info);
+
+void ath10k_err(struct ath10k *ar, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_err(ar->dev, "%pV", &vaf);
+ trace_ath10k_log_err(ar, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(ath10k_err);
+
+void ath10k_warn(struct ath10k *ar, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_warn_ratelimited(ar->dev, "%pV", &vaf);
+ trace_ath10k_log_warn(ar, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(ath10k_warn);
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+
+static ssize_t ath10k_read_wmi_services(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char *buf;
+ size_t len = 0, buf_len = 8192;
+ const char *name;
+ ssize_t ret_cnt;
+ bool enabled;
+ int i;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < WMI_SERVICE_MAX; i++) {
+ enabled = test_bit(i, ar->wmi.svc_map);
+ name = wmi_service_name(i);
+
+ if (!name) {
+ if (enabled)
+ len += scnprintf(buf + len, buf_len - len,
+ "%-40s %s (bit %d)\n",
+ "unknown", "enabled", i);
+
+ continue;
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "%-40s %s\n",
+ name, enabled ? "enabled" : "-");
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_wmi_services = {
+ .read = ath10k_read_wmi_services,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath10k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath10k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath10k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath10k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath10k_fw_stats_peers_free(struct list_head *head)
+{
+ struct ath10k_fw_stats_peer *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath10k_fw_extd_stats_peers_free(struct list_head *head)
+{
+ struct ath10k_fw_extd_stats_peer *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.fw_stats_done = false;
+ ar->debug.fw_stats.extended = false;
+ ath10k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+ ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+ ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+ ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_fw_stats stats = {};
+ bool is_start, is_started, is_end;
+ size_t num_peers;
+ size_t num_vdevs;
+ int ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.peers);
+ INIT_LIST_HEAD(&stats.peers_extd);
+
+ spin_lock_bh(&ar->data_lock);
+ ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
+ if (ret) {
+ ath10k_warn(ar, "failed to pull fw stats: %d\n", ret);
+ goto free;
+ }
+
+ /* Stat data may exceed htc-wmi buffer limit. In such case firmware
+ * splits the stats data and delivers it in a ping-pong fashion of
+ * request cmd-update event.
+ *
+ * However there is no explicit end-of-data. Instead start-of-data is
+ * used as an implicit one. This works as follows:
+ * a) discard stat update events until one with pdev stats is
+ * delivered - this skips session started at end of (b)
+ * b) consume stat update events until another one with pdev stats is
+ * delivered which is treated as end-of-data and is itself discarded
+ */
+ if (ath10k_peer_stats_enabled(ar))
+ ath10k_sta_update_rx_duration(ar, &stats);
+
+ if (ar->debug.fw_stats_done) {
+ if (!ath10k_peer_stats_enabled(ar))
+ ath10k_warn(ar, "received unsolicited stats update event\n");
+
+ goto free;
+ }
+
+ num_peers = list_count_nodes(&ar->debug.fw_stats.peers);
+ num_vdevs = list_count_nodes(&ar->debug.fw_stats.vdevs);
+ is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
+ !list_empty(&stats.pdevs));
+ is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
+ !list_empty(&stats.pdevs));
+
+ if (is_start)
+ list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
+
+ if (is_end)
+ ar->debug.fw_stats_done = true;
+
+ if (stats.extended)
+ ar->debug.fw_stats.extended = true;
+
+ is_started = !list_empty(&ar->debug.fw_stats.pdevs);
+
+ if (is_started && !is_end) {
+ if (num_peers >= ATH10K_MAX_NUM_PEER_IDS) {
+ /* Although this is unlikely impose a sane limit to
+ * prevent firmware from DoS-ing the host.
+ */
+ ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+ ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd);
+ ath10k_warn(ar, "dropping fw peer stats\n");
+ goto free;
+ }
+
+ if (num_vdevs >= BITS_PER_LONG) {
+ ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+ ath10k_warn(ar, "dropping fw vdev stats\n");
+ goto free;
+ }
+
+ if (!list_empty(&stats.peers))
+ list_splice_tail_init(&stats.peers_extd,
+ &ar->debug.fw_stats.peers_extd);
+
+ list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
+ list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs);
+ }
+
+ complete(&ar->debug.fw_stats_complete);
+
+free:
+ /* In some cases lists have been spliced and cleared. Free up
+ * resources if that is not the case.
+ */
+ ath10k_fw_stats_pdevs_free(&stats.pdevs);
+ ath10k_fw_stats_vdevs_free(&stats.vdevs);
+ ath10k_fw_stats_peers_free(&stats.peers);
+ ath10k_fw_extd_stats_peers_free(&stats.peers_extd);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+int ath10k_debug_fw_stats_request(struct ath10k *ar)
+{
+ unsigned long timeout, time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ timeout = jiffies + msecs_to_jiffies(1 * HZ);
+
+ ath10k_debug_fw_stats_reset(ar);
+
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ reinit_completion(&ar->debug.fw_stats_complete);
+
+ ret = ath10k_wmi_request_stats(ar, ar->fw_stats_req_mask);
+ if (ret) {
+ ath10k_warn(ar, "could not request stats (%d)\n", ret);
+ return ret;
+ }
+
+ time_left =
+ wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+ 1 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->debug.fw_stats_done) {
+ spin_unlock_bh(&ar->data_lock);
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ return 0;
+}
+
+static int ath10k_fw_stats_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH10K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ret = ath10k_debug_fw_stats_request(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request fw stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ret = ath10k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, buf);
+ if (ret) {
+ ath10k_warn(ar, "failed to fill fw stats: %d\n", ret);
+ goto err_free;
+ }
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_fw_stats_release(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_fw_stats = {
+ .open = ath10k_fw_stats_open,
+ .release = ath10k_fw_stats_release,
+ .read = ath10k_fw_stats_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ size_t len = 0, buf_len = 500;
+ char *buf;
+
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock_bh(&ar->data_lock);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter);
+ len += scnprintf(buf + len, buf_len - len,
+ "fw_warm_reset_counter\t\t%d\n",
+ ar->stats.fw_warm_reset_counter);
+ len += scnprintf(buf + len, buf_len - len,
+ "fw_cold_reset_counter\t\t%d\n",
+ ar->stats.fw_cold_reset_counter);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations fops_fw_reset_stats = {
+ .open = simple_open,
+ .read = ath10k_debug_fw_reset_stats_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/* This is a clean assert crash in firmware. */
+static int ath10k_debug_fw_assert(struct ath10k *ar)
+{
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + 16);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+
+ /* big enough number so that firmware asserts */
+ cmd->vdev_id = __cpu_to_le32(0x7ffe);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_install_key_cmdid);
+}
+
+static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char buf[] =
+ "To simulate firmware crash write one of the keywords to this file:\n"
+ "`soft` - this will send WMI_FORCE_FW_HANG_ASSERT to firmware if FW supports that command.\n"
+ "`hard` - this will send to firmware command with illegal parameters causing firmware crash.\n"
+ "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n"
+ "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
+static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32] = {};
+ ssize_t rc;
+ int ret;
+
+ /* filter partial writes and invalid commands */
+ if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+ return -EINVAL;
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (rc < 0)
+ return rc;
+
+ /* drop the possible '\n' from the end */
+ if (buf[*ppos - 1] == '\n')
+ buf[*ppos - 1] = '\0';
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_RESTARTED) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!strcmp(buf, "soft")) {
+ ath10k_info(ar, "simulating soft firmware crash\n");
+ ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
+ } else if (!strcmp(buf, "hard")) {
+ ath10k_info(ar, "simulating hard firmware crash\n");
+ /* 0x7fff is vdev id, and it is always out of range for all
+ * firmware variants in order to force a firmware crash.
+ */
+ ret = ath10k_wmi_vdev_set_param(ar, 0x7fff,
+ ar->wmi.vdev_param->rts_threshold,
+ 0);
+ } else if (!strcmp(buf, "assert")) {
+ ath10k_info(ar, "simulating firmware assert crash\n");
+ ret = ath10k_debug_fw_assert(ar);
+ } else if (!strcmp(buf, "hw-restart")) {
+ ath10k_info(ar, "user requested hw restart\n");
+ ath10k_core_start_recovery(ar);
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ret) {
+ ath10k_warn(ar, "failed to simulate firmware crash: %d\n", ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+ .read = ath10k_read_simulate_fw_crash,
+ .write = ath10k_write_simulate_fw_crash,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ size_t len;
+ char buf[50];
+
+ len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->bus_param.chip_id);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_chip_id = {
+ .read = ath10k_read_chip_id,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_reg_addr_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 buf[32];
+ size_t len = 0;
+ u32 reg_addr;
+
+ mutex_lock(&ar->conf_mutex);
+ reg_addr = ar->debug.reg_addr;
+ mutex_unlock(&ar->conf_mutex);
+
+ len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", reg_addr);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_reg_addr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 reg_addr;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &reg_addr);
+ if (ret)
+ return ret;
+
+ if (!IS_ALIGNED(reg_addr, 4))
+ return -EFAULT;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->debug.reg_addr = reg_addr;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_reg_addr = {
+ .read = ath10k_reg_addr_read,
+ .write = ath10k_reg_addr_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_reg_value_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 buf[48];
+ size_t len;
+ u32 reg_addr, reg_val;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ reg_addr = ar->debug.reg_addr;
+
+ reg_val = ath10k_hif_read32(ar, reg_addr);
+ len = scnprintf(buf, sizeof(buf), "0x%08x:0x%08x\n", reg_addr, reg_val);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_reg_value_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 reg_addr, reg_val;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ reg_addr = ar->debug.reg_addr;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &reg_val);
+ if (ret)
+ goto exit;
+
+ ath10k_hif_write32(ar, reg_addr, reg_val);
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_reg_value = {
+ .read = ath10k_reg_value_read,
+ .write = ath10k_reg_value_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_mem_value_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 *buf;
+ int ret;
+
+ if (*ppos < 0)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ buf = vmalloc(count);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ ret = ath10k_hif_diag_read(ar, *ppos, buf, count);
+ if (ret) {
+ ath10k_warn(ar, "failed to read address 0x%08x via diagnose window from debugfs: %d\n",
+ (u32)(*ppos), ret);
+ goto exit;
+ }
+
+ ret = copy_to_user(user_buf, buf, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ count -= ret;
+ *ppos += count;
+ ret = count;
+
+exit:
+ vfree(buf);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_mem_value_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 *buf;
+ int ret;
+
+ if (*ppos < 0)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ buf = vmalloc(count);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ ret = copy_from_user(buf, user_buf, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ ret = ath10k_hif_diag_write(ar, *ppos, buf, count);
+ if (ret) {
+ ath10k_warn(ar, "failed to write address 0x%08x via diagnose window from debugfs: %d\n",
+ (u32)(*ppos), ret);
+ goto exit;
+ }
+
+ *ppos += count;
+ ret = count;
+
+exit:
+ vfree(buf);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_mem_value = {
+ .read = ath10k_mem_value_read,
+ .write = ath10k_mem_value_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath10k_debug_htt_stats_req(struct ath10k *ar)
+{
+ u64 cookie;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->debug.htt_stats_mask == 0)
+ /* htt stats are disabled */
+ return 0;
+
+ if (ar->state != ATH10K_STATE_ON)
+ return 0;
+
+ cookie = get_jiffies_64();
+
+ ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
+ ar->debug.reset_htt_stats, cookie);
+ if (ret) {
+ ath10k_warn(ar, "failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+
+ queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
+ msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
+
+ return 0;
+}
+
+static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ debug.htt_stats_dwork.work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_debug_htt_stats_req(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static ssize_t ath10k_read_htt_stats_mask(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+
+ len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_htt_stats_mask(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long mask;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &mask);
+ if (ret)
+ return ret;
+
+ /* max 17 bit masks (for now) */
+ if (mask > HTT_STATS_BIT_MASK)
+ return -E2BIG;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.htt_stats_mask = mask;
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ goto out;
+
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_mask = {
+ .read = ath10k_read_htt_stats_mask,
+ .write = ath10k_write_htt_stats_mask,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[64];
+ u8 amsdu, ampdu;
+ size_t len;
+
+ mutex_lock(&ar->conf_mutex);
+
+ amsdu = ar->htt.max_num_amsdu;
+ ampdu = ar->htt.max_num_ampdu;
+ mutex_unlock(&ar->conf_mutex);
+
+ len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int res;
+ char buf[64] = {};
+ unsigned int amsdu, ampdu;
+
+ res = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (res <= 0)
+ return res;
+
+ res = sscanf(buf, "%u %u", &amsdu, &ampdu);
+
+ if (res != 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ res = ath10k_htt_h2t_aggr_cfg_msg(&ar->htt, ampdu, amsdu);
+ if (res)
+ goto out;
+
+ res = count;
+ ar->htt.max_num_amsdu = amsdu;
+ ar->htt.max_num_ampdu = ampdu;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return res;
+}
+
+static const struct file_operations fops_htt_max_amsdu_ampdu = {
+ .read = ath10k_read_htt_max_amsdu_ampdu,
+ .write = ath10k_write_htt_max_amsdu_ampdu,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_fw_dbglog(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ size_t len;
+ char buf[96];
+
+ len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n",
+ ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_fw_dbglog(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ char buf[96] = {};
+ unsigned int log_level;
+ u64 mask;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%llx %u", &mask, &log_level);
+
+ if (!ret)
+ return -EINVAL;
+
+ if (ret == 1)
+ /* default if user did not specify */
+ log_level = ATH10K_DBGLOG_LEVEL_WARN;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.fw_dbglog_mask = mask;
+ ar->debug.fw_dbglog_level = log_level;
+
+ if (ar->state == ATH10K_STATE_ON) {
+ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
+ ar->debug.fw_dbglog_level);
+ if (ret) {
+ ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n",
+ ret);
+ goto exit;
+ }
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+/* TODO: Would be nice to always support ethtool stats, would need to
+ * move the stats storage out of ath10k_debug, or always have ath10k_debug
+ * struct available..
+ */
+
+/* This generally corresponds to the debugfs fw_stats file */
+static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_pkts_nic",
+ "tx_bytes_nic",
+ "rx_pkts_nic",
+ "rx_bytes_nic",
+ "d_noise_floor",
+ "d_cycle_count",
+ "d_phy_error",
+ "d_rts_bad",
+ "d_rts_good",
+ "d_tx_power", /* in .5 dbM I think */
+ "d_rx_crc_err", /* fcs_bad */
+ "d_rx_crc_err_drop", /* frame with FCS error, dropped late in kernel */
+ "d_no_beacon",
+ "d_tx_mpdus_queued",
+ "d_tx_msdu_queued",
+ "d_tx_msdu_dropped",
+ "d_local_enqued",
+ "d_local_freed",
+ "d_tx_ppdu_hw_queued",
+ "d_tx_ppdu_reaped",
+ "d_tx_fifo_underrun",
+ "d_tx_ppdu_abort",
+ "d_tx_mpdu_requeued",
+ "d_tx_excessive_retries",
+ "d_tx_hw_rate",
+ "d_tx_dropped_sw_retries",
+ "d_tx_illegal_rate",
+ "d_tx_continuous_xretries",
+ "d_tx_timeout",
+ "d_tx_mpdu_txop_limit",
+ "d_pdev_resets",
+ "d_rx_mid_ppdu_route_change",
+ "d_rx_status",
+ "d_rx_extra_frags_ring0",
+ "d_rx_extra_frags_ring1",
+ "d_rx_extra_frags_ring2",
+ "d_rx_extra_frags_ring3",
+ "d_rx_msdu_htt",
+ "d_rx_mpdu_htt",
+ "d_rx_msdu_stack",
+ "d_rx_mpdu_stack",
+ "d_rx_phy_err",
+ "d_rx_phy_err_drops",
+ "d_rx_mpdu_errors", /* FCS, MIC, ENC */
+ "d_fw_crash_count",
+ "d_fw_warm_reset_count",
+ "d_fw_cold_reset_count",
+};
+
+#define ATH10K_SSTATS_LEN ARRAY_SIZE(ath10k_gstrings_stats)
+
+void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, ath10k_gstrings_stats,
+ sizeof(ath10k_gstrings_stats));
+}
+
+int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ATH10K_SSTATS_LEN;
+
+ return 0;
+}
+
+void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ath10k *ar = hw->priv;
+ static const struct ath10k_fw_stats_pdev zero_stats = {};
+ const struct ath10k_fw_stats_pdev *pdev_stats;
+ int i = 0, ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_ON) {
+ ret = ath10k_debug_fw_stats_request(ar);
+ if (ret) {
+ /* just print a warning and try to use older results */
+ ath10k_warn(ar,
+ "failed to get fw stats for ethtool: %d\n",
+ ret);
+ }
+ }
+
+ pdev_stats = list_first_entry_or_null(&ar->debug.fw_stats.pdevs,
+ struct ath10k_fw_stats_pdev,
+ list);
+ if (!pdev_stats) {
+ /* no results available so just return zeroes */
+ pdev_stats = &zero_stats;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ data[i++] = pdev_stats->hw_reaped; /* ppdu reaped */
+ data[i++] = 0; /* tx bytes */
+ data[i++] = pdev_stats->htt_mpdus;
+ data[i++] = 0; /* rx bytes */
+ data[i++] = pdev_stats->ch_noise_floor;
+ data[i++] = pdev_stats->cycle_count;
+ data[i++] = pdev_stats->phy_err_count;
+ data[i++] = pdev_stats->rts_bad;
+ data[i++] = pdev_stats->rts_good;
+ data[i++] = pdev_stats->chan_tx_power;
+ data[i++] = pdev_stats->fcs_bad;
+ data[i++] = ar->stats.rx_crc_err_drop;
+ data[i++] = pdev_stats->no_beacons;
+ data[i++] = pdev_stats->mpdu_enqued;
+ data[i++] = pdev_stats->msdu_enqued;
+ data[i++] = pdev_stats->wmm_drop;
+ data[i++] = pdev_stats->local_enqued;
+ data[i++] = pdev_stats->local_freed;
+ data[i++] = pdev_stats->hw_queued;
+ data[i++] = pdev_stats->hw_reaped;
+ data[i++] = pdev_stats->underrun;
+ data[i++] = pdev_stats->tx_abort;
+ data[i++] = pdev_stats->mpdus_requeued;
+ data[i++] = pdev_stats->tx_ko;
+ data[i++] = pdev_stats->data_rc;
+ data[i++] = pdev_stats->sw_retry_failure;
+ data[i++] = pdev_stats->illgl_rate_phy_err;
+ data[i++] = pdev_stats->pdev_cont_xretry;
+ data[i++] = pdev_stats->pdev_tx_timeout;
+ data[i++] = pdev_stats->txop_ovf;
+ data[i++] = pdev_stats->pdev_resets;
+ data[i++] = pdev_stats->mid_ppdu_route_change;
+ data[i++] = pdev_stats->status_rcvd;
+ data[i++] = pdev_stats->r0_frags;
+ data[i++] = pdev_stats->r1_frags;
+ data[i++] = pdev_stats->r2_frags;
+ data[i++] = pdev_stats->r3_frags;
+ data[i++] = pdev_stats->htt_msdus;
+ data[i++] = pdev_stats->htt_mpdus;
+ data[i++] = pdev_stats->loc_msdus;
+ data[i++] = pdev_stats->loc_mpdus;
+ data[i++] = pdev_stats->phy_errs;
+ data[i++] = pdev_stats->phy_err_drop;
+ data[i++] = pdev_stats->mpdu_errs;
+ data[i++] = ar->stats.fw_crash_counter;
+ data[i++] = ar->stats.fw_warm_reset_counter;
+ data[i++] = ar->stats.fw_cold_reset_counter;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ WARN_ON(i != ATH10K_SSTATS_LEN);
+}
+
+static const struct file_operations fops_fw_dbglog = {
+ .read = ath10k_read_fw_dbglog,
+ .write = ath10k_write_fw_dbglog,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath10k_debug_cal_data_fetch(struct ath10k *ar)
+{
+ u32 hi_addr;
+ __le32 addr;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN))
+ return -EINVAL;
+
+ if (ar->hw_params.cal_data_len == 0)
+ return -EOPNOTSUPP;
+
+ hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
+
+ ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
+ if (ret) {
+ ath10k_warn(ar, "failed to read hi_board_data address: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data,
+ ar->hw_params.cal_data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_ON ||
+ ar->state == ATH10K_STATE_UTF) {
+ ath10k_debug_cal_data_fetch(ar);
+ }
+
+ file->private_data = ar;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static ssize_t ath10k_debug_cal_data_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+
+ mutex_lock(&ar->conf_mutex);
+
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ ar->debug.cal_data,
+ ar->hw_params.cal_data_len);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static ssize_t ath10k_write_ani_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ u8 enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->ani_enabled == enable) {
+ ret = count;
+ goto exit;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->ani_enable,
+ enable);
+ if (ret) {
+ ath10k_warn(ar, "ani_enable failed from debugfs: %d\n", ret);
+ goto exit;
+ }
+ ar->ani_enabled = enable;
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ size_t len;
+ char buf[32];
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->ani_enabled);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ani_enable = {
+ .read = ath10k_read_ani_enable,
+ .write = ath10k_write_ani_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations fops_cal_data = {
+ .open = ath10k_debug_cal_data_open,
+ .read = ath10k_debug_cal_data_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_nf_cal_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ size_t len;
+ char buf[32];
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->debug.nf_cal_period);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_nf_cal_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long period;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &period);
+ if (ret)
+ return ret;
+
+ if (period > WMI_PDEV_PARAM_CAL_PERIOD_MAX)
+ return -EINVAL;
+
+ /* there's no way to switch back to the firmware default */
+ if (period == 0)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.nf_cal_period = period;
+
+ if (ar->state != ATH10K_STATE_ON) {
+ /* firmware is not running, nothing else to do */
+ ret = count;
+ goto exit;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period,
+ ar->debug.nf_cal_period);
+ if (ret) {
+ ath10k_warn(ar, "cal period cfg failed from debugfs: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_nf_cal_period = {
+ .read = ath10k_read_nf_cal_period,
+ .write = ath10k_write_nf_cal_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#define ATH10K_TPC_CONFIG_BUF_SIZE (1024 * 1024)
+
+static int ath10k_debug_tpc_stats_request(struct ath10k *ar)
+{
+ int ret;
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->debug.tpc_complete);
+
+ ret = ath10k_wmi_pdev_get_tpc_config(ar, WMI_TPC_CONFIG_PARAM);
+ if (ret) {
+ ath10k_warn(ar, "failed to request tpc config: %d\n", ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->debug.tpc_complete,
+ 1 * HZ);
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+ struct ath10k_tpc_stats *tpc_stats)
+{
+ spin_lock_bh(&ar->data_lock);
+
+ kfree(ar->debug.tpc_stats);
+ ar->debug.tpc_stats = tpc_stats;
+ complete(&ar->debug.tpc_complete);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void
+ath10k_debug_tpc_stats_final_process(struct ath10k *ar,
+ struct ath10k_tpc_stats_final *tpc_stats)
+{
+ spin_lock_bh(&ar->data_lock);
+
+ kfree(ar->debug.tpc_stats_final);
+ ar->debug.tpc_stats_final = tpc_stats;
+ complete(&ar->debug.tpc_complete);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats,
+ unsigned int j, char *buf, size_t *len)
+{
+ int i;
+ size_t buf_len;
+ static const char table_str[][5] = { "CDD",
+ "STBC",
+ "TXBF" };
+ static const char pream_str[][6] = { "CCK",
+ "OFDM",
+ "HT20",
+ "HT40",
+ "VHT20",
+ "VHT40",
+ "VHT80",
+ "HTCUP" };
+
+ buf_len = ATH10K_TPC_CONFIG_BUF_SIZE;
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "********************************\n");
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "******************* %s POWER TABLE ****************\n",
+ table_str[j]);
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "********************************\n");
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "No. Preamble Rate_code ");
+
+ for (i = 0; i < tpc_stats->num_tx_chain; i++)
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "tpc_value%d ", i);
+
+ *len += scnprintf(buf + *len, buf_len - *len, "\n");
+
+ for (i = 0; i < tpc_stats->rate_max; i++) {
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "%8d %s 0x%2x %s\n", i,
+ pream_str[tpc_stats->tpc_table[j].pream_idx[i]],
+ tpc_stats->tpc_table[j].rate_code[i],
+ tpc_stats->tpc_table[j].tpc_value[i]);
+ }
+
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "***********************************\n");
+}
+
+static void ath10k_tpc_stats_fill(struct ath10k *ar,
+ struct ath10k_tpc_stats *tpc_stats,
+ char *buf)
+{
+ int j;
+ size_t len, buf_len;
+
+ len = 0;
+ buf_len = ATH10K_TPC_CONFIG_BUF_SIZE;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (!tpc_stats) {
+ ath10k_warn(ar, "failed to get tpc stats\n");
+ goto unlock;
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "*************************************\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "TPC config for channel %4d mode %d\n",
+ tpc_stats->chan_freq,
+ tpc_stats->phy_mode);
+ len += scnprintf(buf + len, buf_len - len,
+ "*************************************\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "CTL = 0x%2x Reg. Domain = %2d\n",
+ tpc_stats->ctl,
+ tpc_stats->reg_domain);
+ len += scnprintf(buf + len, buf_len - len,
+ "Antenna Gain = %2d Reg. Max Antenna Gain = %2d\n",
+ tpc_stats->twice_antenna_gain,
+ tpc_stats->twice_antenna_reduction);
+ len += scnprintf(buf + len, buf_len - len,
+ "Power Limit = %2d Reg. Max Power = %2d\n",
+ tpc_stats->power_limit,
+ tpc_stats->twice_max_rd_power / 2);
+ len += scnprintf(buf + len, buf_len - len,
+ "Num tx chains = %2d Num supported rates = %2d\n",
+ tpc_stats->num_tx_chain,
+ tpc_stats->rate_max);
+
+ for (j = 0; j < WMI_TPC_FLAG; j++) {
+ switch (j) {
+ case WMI_TPC_TABLE_TYPE_CDD:
+ if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+ len += scnprintf(buf + len, buf_len - len,
+ "CDD not supported\n");
+ break;
+ }
+
+ ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+ break;
+ case WMI_TPC_TABLE_TYPE_STBC:
+ if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+ len += scnprintf(buf + len, buf_len - len,
+ "STBC not supported\n");
+ break;
+ }
+
+ ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+ break;
+ case WMI_TPC_TABLE_TYPE_TXBF:
+ if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+ len += scnprintf(buf + len, buf_len - len,
+ "TXBF not supported\n***************************\n");
+ break;
+ }
+
+ ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+ break;
+ default:
+ len += scnprintf(buf + len, buf_len - len,
+ "Invalid Type\n");
+ break;
+ }
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+static int ath10k_tpc_stats_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ret = ath10k_debug_tpc_stats_request(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request tpc config stats: %d\n",
+ ret);
+ goto err_free;
+ }
+
+ ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf);
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_tpc_stats_release(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath10k_tpc_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_tpc_stats = {
+ .open = ath10k_tpc_stats_open,
+ .release = ath10k_tpc_stats_release,
+ .read = ath10k_tpc_stats_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath10k_debug_start(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ /* continue normally anyway, this isn't serious */
+ ath10k_warn(ar, "failed to start htt stats workqueue: %d\n",
+ ret);
+
+ if (ar->debug.fw_dbglog_mask) {
+ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
+ ATH10K_DBGLOG_LEVEL_WARN);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar, "failed to enable dbglog during start: %d",
+ ret);
+ }
+
+ if (ar->pktlog_filter) {
+ ret = ath10k_wmi_pdev_pktlog_enable(ar,
+ ar->pktlog_filter);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar,
+ "failed to enable pktlog filter %x: %d\n",
+ ar->pktlog_filter, ret);
+ } else {
+ ret = ath10k_wmi_pdev_pktlog_disable(ar);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
+ }
+
+ if (ar->debug.nf_cal_period &&
+ !test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->cal_period,
+ ar->debug.nf_cal_period);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar, "cal period cfg failed from debug start: %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
+void ath10k_debug_stop(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features))
+ ath10k_debug_cal_data_fetch(ar);
+
+ /* Must not use _sync to avoid deadlock, we do that in
+ * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
+ * warning from timer_delete().
+ */
+ if (ar->debug.htt_stats_mask != 0)
+ cancel_delayed_work(&ar->debug.htt_stats_dwork);
+
+ ath10k_wmi_pdev_pktlog_disable(ar);
+}
+
+static ssize_t ath10k_write_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ struct ath10k_vif *arvif;
+
+ /* Just check for the first vif alone, as all the vifs will be
+ * sharing the same channel and if the channel is disabled, all the
+ * vifs will share the same 'is_started' state.
+ */
+ arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list);
+ if (!arvif->is_started)
+ return -EINVAL;
+
+ ieee80211_radar_detected(ar->hw, NULL);
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = ath10k_write_simulate_radar,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#define ATH10K_DFS_STAT(s, p) (\
+ len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
+ ar->debug.dfs_stats.p))
+
+#define ATH10K_DFS_POOL_STAT(s, p) (\
+ len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
+ ar->debug.dfs_pool_stats.p))
+
+static ssize_t ath10k_read_dfs_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int retval = 0, len = 0;
+ const int size = 8000;
+ struct ath10k *ar = file->private_data;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (!ar->dfs_detector) {
+ len += scnprintf(buf + len, size - len, "DFS not enabled\n");
+ goto exit;
+ }
+
+ ar->debug.dfs_pool_stats =
+ ar->dfs_detector->get_stats(ar->dfs_detector);
+
+ len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
+
+ ATH10K_DFS_STAT("reported phy errors", phy_errors);
+ ATH10K_DFS_STAT("pulse events reported", pulses_total);
+ ATH10K_DFS_STAT("DFS pulses detected", pulses_detected);
+ ATH10K_DFS_STAT("DFS pulses discarded", pulses_discarded);
+ ATH10K_DFS_STAT("Radars detected", radar_detected);
+
+ len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
+ ATH10K_DFS_POOL_STAT("Pool references", pool_reference);
+ ATH10K_DFS_POOL_STAT("Pulses allocated", pulse_allocated);
+ ATH10K_DFS_POOL_STAT("Pulses alloc error", pulse_alloc_error);
+ ATH10K_DFS_POOL_STAT("Pulses in use", pulse_used);
+ ATH10K_DFS_POOL_STAT("Seqs. allocated", pseq_allocated);
+ ATH10K_DFS_POOL_STAT("Seqs. alloc error", pseq_alloc_error);
+ ATH10K_DFS_POOL_STAT("Seqs. in use", pseq_used);
+
+exit:
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_dfs_stats = {
+ .read = ath10k_read_dfs_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_write_pktlog_filter(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 filter;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &filter))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ar->pktlog_filter = filter;
+ ret = count;
+ goto out;
+ }
+
+ if (filter == ar->pktlog_filter) {
+ ret = count;
+ goto out;
+ }
+
+ if (filter) {
+ ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
+ ar->pktlog_filter, ret);
+ goto out;
+ }
+ } else {
+ ret = ath10k_wmi_pdev_pktlog_disable(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
+ goto out;
+ }
+ }
+
+ ar->pktlog_filter = filter;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+ ar->pktlog_filter);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pktlog_filter = {
+ .read = ath10k_read_pktlog_filter,
+ .write = ath10k_write_pktlog_filter,
+ .open = simple_open
+};
+
+static ssize_t ath10k_write_quiet_period(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 period;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &period))
+ return -EINVAL;
+
+ if (period < ATH10K_QUIET_PERIOD_MIN) {
+ ath10k_warn(ar, "Quiet period %u can not be lesser than 25ms\n",
+ period);
+ return -EINVAL;
+ }
+ mutex_lock(&ar->conf_mutex);
+ ar->thermal.quiet_period = period;
+ ath10k_thermal_set_throttling(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static ssize_t ath10k_read_quiet_period(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->thermal.quiet_period);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_quiet_period = {
+ .read = ath10k_read_quiet_period,
+ .write = ath10k_write_quiet_period,
+ .open = simple_open
+};
+
+static ssize_t ath10k_write_btcoex(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ ssize_t ret;
+ bool val;
+ u32 pdev_param;
+
+ ret = kstrtobool_from_user(ubuf, count, &val);
+ if (ret)
+ return ret;
+
+ if (!ar->coex_support)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_RESTARTED) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) {
+ ret = count;
+ goto exit;
+ }
+
+ pdev_param = ar->wmi.pdev_param->enable_btcoex;
+ if (test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+ ar->running_fw->fw_file.fw_features)) {
+ ret = ath10k_wmi_pdev_set_param(ar, pdev_param, val);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable btcoex: %zd\n", ret);
+ ret = count;
+ goto exit;
+ }
+ } else {
+ ath10k_info(ar, "restarting firmware due to btcoex change");
+ ath10k_core_start_recovery(ar);
+ }
+
+ if (val)
+ set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
+ else
+ clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_read_btcoex(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags));
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_btcoex = {
+ .read = ath10k_read_btcoex,
+ .write = ath10k_write_btcoex,
+ .open = simple_open
+};
+
+static ssize_t ath10k_write_enable_extd_tx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 filter;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &filter))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ar->debug.enable_extd_tx_stats = filter;
+ ret = count;
+ goto out;
+ }
+
+ if (filter == ar->debug.enable_extd_tx_stats) {
+ ret = count;
+ goto out;
+ }
+
+ ar->debug.enable_extd_tx_stats = filter;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath10k_read_enable_extd_tx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+ ar->debug.enable_extd_tx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_enable_extd_tx_stats = {
+ .read = ath10k_read_enable_extd_tx_stats,
+ .write = ath10k_write_enable_extd_tx_stats,
+ .open = simple_open
+};
+
+static ssize_t ath10k_write_peer_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ ssize_t ret;
+ bool val;
+
+ ret = kstrtobool_from_user(ubuf, count, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_RESTARTED) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val)) {
+ ret = count;
+ goto exit;
+ }
+
+ if (val)
+ set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+ else
+ clear_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+
+ ath10k_info(ar, "restarting firmware due to Peer stats change");
+
+ ath10k_core_start_recovery(ar);
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath10k_read_peer_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags));
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_stats = {
+ .read = ath10k_read_peer_stats,
+ .write = ath10k_write_peer_stats,
+ .open = simple_open
+};
+
+static ssize_t ath10k_debug_fw_checksums_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ size_t len = 0, buf_len = 4096;
+ ssize_t ret_cnt;
+ char *buf;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "firmware-N.bin\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.fw_file.firmware->data,
+ ar->normal_mode_fw.fw_file.firmware->size));
+ len += scnprintf(buf + len, buf_len - len,
+ "athwlan\t\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.fw_file.firmware_data,
+ ar->normal_mode_fw.fw_file.firmware_len));
+ len += scnprintf(buf + len, buf_len - len,
+ "otp\t\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.fw_file.otp_data,
+ ar->normal_mode_fw.fw_file.otp_len));
+ len += scnprintf(buf + len, buf_len - len,
+ "codeswap\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.fw_file.codeswap_data,
+ ar->normal_mode_fw.fw_file.codeswap_len));
+ len += scnprintf(buf + len, buf_len - len,
+ "board-N.bin\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.board->data,
+ ar->normal_mode_fw.board->size));
+ len += scnprintf(buf + len, buf_len - len,
+ "board\t\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.board_data,
+ ar->normal_mode_fw.board_len));
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_fw_checksums = {
+ .read = ath10k_debug_fw_checksums_read,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_sta_tid_stats_mask_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+
+ len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->sta_tid_stats_mask);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_sta_tid_stats_mask_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ ssize_t ret;
+ u32 mask;
+
+ ret = kstrtoint_from_user(user_buf, count, 0, &mask);
+ if (ret)
+ return ret;
+
+ ar->sta_tid_stats_mask = mask;
+
+ return count;
+}
+
+static const struct file_operations fops_sta_tid_stats_mask = {
+ .read = ath10k_sta_tid_stats_mask_read,
+ .write = ath10k_sta_tid_stats_mask_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath10k_debug_tpc_stats_final_request(struct ath10k *ar)
+{
+ int ret;
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->debug.tpc_complete);
+
+ ret = ath10k_wmi_pdev_get_tpc_table_cmdid(ar, WMI_TPC_CONFIG_PARAM);
+ if (ret) {
+ ath10k_warn(ar, "failed to request tpc table cmdid: %d\n", ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->debug.tpc_complete,
+ 1 * HZ);
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath10k_tpc_stats_final_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+ void *buf;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ret = ath10k_debug_tpc_stats_final_request(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request tpc stats final: %d\n",
+ ret);
+ goto err_free;
+ }
+
+ ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf);
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_tpc_stats_final_release(struct inode *inode,
+ struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath10k_tpc_stats_final_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ unsigned int len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_tpc_stats_final = {
+ .open = ath10k_tpc_stats_final_open,
+ .release = ath10k_tpc_stats_final_release,
+ .read = ath10k_tpc_stats_final_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_write_warm_hw_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ bool val;
+
+ if (kstrtobool_from_user(user_buf, count, &val))
+ return -EFAULT;
+
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pdev_reset,
+ WMI_RST_MODE_WARM_RESET);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to enable warm hw reset: %d\n", ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_warm_hw_reset = {
+ .write = ath10k_write_warm_hw_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath10k_peer_ps_state_disable(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k *ar = data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static ssize_t ath10k_write_ps_state_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ u32 param;
+ u8 ps_state_enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable))
+ return -EINVAL;
+
+ if (ps_state_enable > 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->ps_state_enable == ps_state_enable) {
+ ret = count;
+ goto exit;
+ }
+
+ param = ar->wmi.pdev_param->peer_sta_ps_statechg_enable;
+ ret = ath10k_wmi_pdev_set_param(ar, param, ps_state_enable);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable ps_state_enable: %d\n",
+ ret);
+ goto exit;
+ }
+ ar->ps_state_enable = ps_state_enable;
+
+ if (!ar->ps_state_enable)
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath10k_peer_ps_state_disable,
+ ar);
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_read_ps_state_enable(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+ char buf[32];
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->ps_state_enable);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ps_state_enable = {
+ .read = ath10k_read_ps_state_enable,
+ .write = ath10k_write_ps_state_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_write_reset_htt_stats(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long reset;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &reset);
+ if (ret)
+ return ret;
+
+ if (reset == 0 || reset > 0x1ffff)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.reset_htt_stats = reset;
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ goto out;
+
+ ar->debug.reset_htt_stats = 0;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_reset_htt_stats = {
+ .write = ath10k_write_reset_htt_stats,
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+int ath10k_debug_create(struct ath10k *ar)
+{
+ ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
+ if (!ar->debug.cal_data)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.peers_extd);
+
+ return 0;
+}
+
+void ath10k_debug_destroy(struct ath10k *ar)
+{
+ vfree(ar->debug.cal_data);
+ ar->debug.cal_data = NULL;
+
+ ath10k_debug_fw_stats_reset(ar);
+
+ kfree(ar->debug.tpc_stats);
+ kfree(ar->debug.tpc_stats_final);
+}
+
+int ath10k_debug_register(struct ath10k *ar)
+{
+ ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
+ ar->hw->wiphy->debugfsdir);
+ if (IS_ERR_OR_NULL(ar->debug.debugfs_phy)) {
+ if (IS_ERR(ar->debug.debugfs_phy))
+ return PTR_ERR(ar->debug.debugfs_phy);
+
+ return -ENOMEM;
+ }
+
+ INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
+ ath10k_debug_htt_stats_dwork);
+
+ init_completion(&ar->debug.tpc_complete);
+ init_completion(&ar->debug.fw_stats_complete);
+
+ debugfs_create_file("fw_stats", 0400, ar->debug.debugfs_phy, ar,
+ &fops_fw_stats);
+
+ debugfs_create_file("fw_reset_stats", 0400, ar->debug.debugfs_phy, ar,
+ &fops_fw_reset_stats);
+
+ debugfs_create_file("wmi_services", 0400, ar->debug.debugfs_phy, ar,
+ &fops_wmi_services);
+
+ debugfs_create_file("simulate_fw_crash", 0600, ar->debug.debugfs_phy, ar,
+ &fops_simulate_fw_crash);
+
+ debugfs_create_file("reg_addr", 0600, ar->debug.debugfs_phy, ar,
+ &fops_reg_addr);
+
+ debugfs_create_file("reg_value", 0600, ar->debug.debugfs_phy, ar,
+ &fops_reg_value);
+
+ debugfs_create_file("mem_value", 0600, ar->debug.debugfs_phy, ar,
+ &fops_mem_value);
+
+ debugfs_create_file("chip_id", 0400, ar->debug.debugfs_phy, ar,
+ &fops_chip_id);
+
+ debugfs_create_file("htt_stats_mask", 0600, ar->debug.debugfs_phy, ar,
+ &fops_htt_stats_mask);
+
+ debugfs_create_file("htt_max_amsdu_ampdu", 0600, ar->debug.debugfs_phy, ar,
+ &fops_htt_max_amsdu_ampdu);
+
+ debugfs_create_file("fw_dbglog", 0600, ar->debug.debugfs_phy, ar,
+ &fops_fw_dbglog);
+
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
+ debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar,
+ &fops_cal_data);
+
+ debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar,
+ &fops_nf_cal_period);
+ }
+
+ debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar,
+ &fops_ani_enable);
+
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy,
+ ar, &fops_simulate_radar);
+
+ debugfs_create_bool("dfs_block_radar_events", 0200,
+ ar->debug.debugfs_phy,
+ &ar->dfs_block_radar_events);
+
+ debugfs_create_file("dfs_stats", 0400, ar->debug.debugfs_phy, ar,
+ &fops_dfs_stats);
+ }
+
+ debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar,
+ &fops_pktlog_filter);
+
+ if (test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+ debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
+ &fops_quiet_period);
+
+ debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar,
+ &fops_tpc_stats);
+
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
+ debugfs_create_file("btcoex", 0644, ar->debug.debugfs_phy, ar,
+ &fops_btcoex);
+
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+ debugfs_create_file("peer_stats", 0644, ar->debug.debugfs_phy, ar,
+ &fops_peer_stats);
+
+ debugfs_create_file("enable_extd_tx_stats", 0644,
+ ar->debug.debugfs_phy, ar,
+ &fops_enable_extd_tx_stats);
+ }
+
+ debugfs_create_file("fw_checksums", 0400, ar->debug.debugfs_phy, ar,
+ &fops_fw_checksums);
+
+ if (IS_ENABLED(CONFIG_MAC80211_DEBUGFS))
+ debugfs_create_file("sta_tid_stats_mask", 0600,
+ ar->debug.debugfs_phy,
+ ar, &fops_sta_tid_stats_mask);
+
+ if (test_bit(WMI_SERVICE_TPC_STATS_FINAL, ar->wmi.svc_map))
+ debugfs_create_file("tpc_stats_final", 0400,
+ ar->debug.debugfs_phy, ar,
+ &fops_tpc_stats_final);
+
+ if (test_bit(WMI_SERVICE_RESET_CHIP, ar->wmi.svc_map))
+ debugfs_create_file("warm_hw_reset", 0600,
+ ar->debug.debugfs_phy, ar,
+ &fops_warm_hw_reset);
+
+ debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_phy, ar,
+ &fops_ps_state_enable);
+
+ debugfs_create_file("reset_htt_stats", 0200, ar->debug.debugfs_phy, ar,
+ &fops_reset_htt_stats);
+
+ return 0;
+}
+
+void ath10k_debug_unregister(struct ath10k *ar)
+{
+ cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
+}
+
+#endif /* CONFIG_ATH10K_DEBUGFS */
+
+#ifdef CONFIG_ATH10K_DEBUG
+void __ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (ath10k_debug_mask & mask)
+ dev_printk(KERN_DEBUG, ar->dev, "%pV", &vaf);
+
+ trace_ath10k_log_dbg(ar, mask, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(__ath10k_dbg);
+
+void ath10k_dbg_dump(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+ char linebuf[256];
+ size_t linebuflen;
+ const void *ptr;
+
+ if (ath10k_debug_mask & mask) {
+ if (msg)
+ __ath10k_dbg(ar, mask, "%s\n", msg);
+
+ for (ptr = buf; (ptr - buf) < len; ptr += 16) {
+ linebuflen = 0;
+ linebuflen += scnprintf(linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen,
+ "%s%08x: ",
+ (prefix ? prefix : ""),
+ (unsigned int)(ptr - buf));
+ hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
+ linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen, true);
+ dev_printk(KERN_DEBUG, ar->dev, "%s\n", linebuf);
+ }
+ }
+
+ /* tracing code doesn't like null strings :/ */
+ trace_ath10k_log_dbg_dump(ar, msg ? msg : "", prefix ? prefix : "",
+ buf, len);
+}
+EXPORT_SYMBOL(ath10k_dbg_dump);
+
+#endif /* CONFIG_ATH10K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
new file mode 100644
index 000000000000..0af787f49b33
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DEBUG_H_
+#define _DEBUG_H_
+
+#include <linux/types.h>
+#include "trace.h"
+
+enum ath10k_debug_mask {
+ ATH10K_DBG_PCI = 0x00000001,
+ ATH10K_DBG_WMI = 0x00000002,
+ ATH10K_DBG_HTC = 0x00000004,
+ ATH10K_DBG_HTT = 0x00000008,
+ ATH10K_DBG_MAC = 0x00000010,
+ ATH10K_DBG_BOOT = 0x00000020,
+ ATH10K_DBG_PCI_DUMP = 0x00000040,
+ ATH10K_DBG_HTT_DUMP = 0x00000080,
+ ATH10K_DBG_MGMT = 0x00000100,
+ ATH10K_DBG_DATA = 0x00000200,
+ ATH10K_DBG_BMI = 0x00000400,
+ ATH10K_DBG_REGULATORY = 0x00000800,
+ ATH10K_DBG_TESTMODE = 0x00001000,
+ ATH10K_DBG_WMI_PRINT = 0x00002000,
+ ATH10K_DBG_PCI_PS = 0x00004000,
+ ATH10K_DBG_AHB = 0x00008000,
+ ATH10K_DBG_SDIO = 0x00010000,
+ ATH10K_DBG_SDIO_DUMP = 0x00020000,
+ ATH10K_DBG_USB = 0x00040000,
+ ATH10K_DBG_USB_BULK = 0x00080000,
+ ATH10K_DBG_SNOC = 0x00100000,
+ ATH10K_DBG_QMI = 0x00200000,
+ ATH10K_DBG_STA = 0x00400000,
+ ATH10K_DBG_ANY = 0xffffffff,
+};
+
+enum ath10k_pktlog_filter {
+ ATH10K_PKTLOG_RX = 0x000000001,
+ ATH10K_PKTLOG_TX = 0x000000002,
+ ATH10K_PKTLOG_RCFIND = 0x000000004,
+ ATH10K_PKTLOG_RCUPDATE = 0x000000008,
+ ATH10K_PKTLOG_DBG_PRINT = 0x000000010,
+ ATH10K_PKTLOG_PEER_STATS = 0x000000040,
+ ATH10K_PKTLOG_ANY = 0x00000005f,
+};
+
+enum ath10k_dbg_aggr_mode {
+ ATH10K_DBG_AGGR_MODE_AUTO,
+ ATH10K_DBG_AGGR_MODE_MANUAL,
+ ATH10K_DBG_AGGR_MODE_MAX,
+};
+
+/* Types of packet log events */
+enum ath_pktlog_type {
+ ATH_PKTLOG_TYPE_TX_CTRL = 1,
+ ATH_PKTLOG_TYPE_TX_STAT,
+};
+
+struct ath10k_pktlog_hdr {
+ __le16 flags;
+ __le16 missed_cnt;
+ __le16 log_type; /* Type of log information foll this header */
+ __le16 size; /* Size of variable length log information in bytes */
+ __le32 timestamp;
+ u8 payload[];
+} __packed;
+
+/* FIXME: How to calculate the buffer size sanely? */
+#define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024)
+
+#define ATH10K_TX_POWER_MAX_VAL 70
+#define ATH10K_TX_POWER_MIN_VAL 0
+
+extern unsigned int ath10k_debug_mask;
+
+__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...);
+
+void ath10k_debug_print_hwfw_info(struct ath10k *ar);
+void ath10k_debug_print_board_info(struct ath10k *ar);
+void ath10k_debug_print_boot_info(struct ath10k *ar);
+void ath10k_print_driver_info(struct ath10k *ar);
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+int ath10k_debug_start(struct ath10k *ar);
+void ath10k_debug_stop(struct ath10k *ar);
+int ath10k_debug_create(struct ath10k *ar);
+void ath10k_debug_destroy(struct ath10k *ar);
+int ath10k_debug_register(struct ath10k *ar);
+void ath10k_debug_unregister(struct ath10k *ar);
+void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+ struct ath10k_tpc_stats *tpc_stats);
+void
+ath10k_debug_tpc_stats_final_process(struct ath10k *ar,
+ struct ath10k_tpc_stats_final *tpc_stats);
+void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
+
+#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
+
+void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data);
+int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset);
+void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data);
+
+static inline u64 ath10k_debug_get_fw_dbglog_mask(struct ath10k *ar)
+{
+ return ar->debug.fw_dbglog_mask;
+}
+
+static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
+{
+ return ar->debug.fw_dbglog_level;
+}
+
+static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
+{
+ return ar->debug.enable_extd_tx_stats;
+}
+
+int ath10k_debug_fw_stats_request(struct ath10k *ar);
+
+#else
+
+static inline int ath10k_debug_start(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_debug_stop(struct ath10k *ar)
+{
+}
+
+static inline int ath10k_debug_create(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_debug_destroy(struct ath10k *ar)
+{
+}
+
+static inline int ath10k_debug_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_debug_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_debug_fw_stats_process(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+}
+
+static inline void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+ struct ath10k_tpc_stats *tpc_stats)
+{
+ kfree(tpc_stats);
+}
+
+static inline void
+ath10k_debug_tpc_stats_final_process(struct ath10k *ar,
+ struct ath10k_tpc_stats_final *tpc_stats)
+{
+ kfree(tpc_stats);
+}
+
+static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
+ int len)
+{
+}
+
+static inline u64 ath10k_debug_get_fw_dbglog_mask(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline int ath10k_debug_is_extd_tx_stats_enabled(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline int ath10k_debug_fw_stats_request(struct ath10k *ar)
+{
+ return 0;
+}
+
+#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
+
+#define ath10k_debug_get_et_strings NULL
+#define ath10k_debug_get_et_sset_count NULL
+#define ath10k_debug_get_et_stats NULL
+
+#endif /* CONFIG_ATH10K_DEBUGFS */
+#ifdef CONFIG_MAC80211_DEBUGFS
+void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir);
+void ath10k_sta_update_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats);
+void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr,
+ unsigned long num_msdus,
+ enum ath10k_pkt_rx_err err,
+ unsigned long unchain_cnt,
+ unsigned long drop_cnt,
+ unsigned long drop_cnt_filter,
+ unsigned long queued_msdus);
+void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar,
+ u16 peer_id, u8 tid,
+ struct htt_rx_indication_mpdu_range *ranges,
+ int num_ranges);
+#else
+static inline
+void ath10k_sta_update_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
+{
+}
+
+static inline
+void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr,
+ unsigned long num_msdus,
+ enum ath10k_pkt_rx_err err,
+ unsigned long unchain_cnt,
+ unsigned long drop_cnt,
+ unsigned long drop_cnt_filter,
+ unsigned long queued_msdus)
+{
+}
+
+static inline
+void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar,
+ u16 peer_id, u8 tid,
+ struct htt_rx_indication_mpdu_range *ranges,
+ int num_ranges)
+{
+}
+#endif /* CONFIG_MAC80211_DEBUGFS */
+
+#ifdef CONFIG_ATH10K_DEBUG
+__printf(3, 4) void __ath10k_dbg(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
+ const char *fmt, ...);
+void ath10k_dbg_dump(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len);
+#else /* CONFIG_ATH10K_DEBUG */
+
+static inline int __ath10k_dbg(struct ath10k *ar,
+ enum ath10k_debug_mask dbg_mask,
+ const char *fmt, ...)
+{
+ return 0;
+}
+
+static inline void ath10k_dbg_dump(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+}
+#endif /* CONFIG_ATH10K_DEBUG */
+
+/* Avoid calling __ath10k_dbg() if debug_mask is not set and tracing
+ * disabled.
+ */
+#define ath10k_dbg(ar, dbg_mask, fmt, ...) \
+do { \
+ if ((ath10k_debug_mask & dbg_mask) || \
+ trace_ath10k_log_dbg_enabled()) \
+ __ath10k_dbg(ar, dbg_mask, fmt, ##__VA_ARGS__); \
+} while (0)
+#endif /* _DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
new file mode 100644
index 000000000000..b9fb192e0b48
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "core.h"
+#include "wmi-ops.h"
+#include "txrx.h"
+#include "debug.h"
+
+static void ath10k_rx_stats_update_amsdu_subfrm(struct ath10k *ar,
+ struct ath10k_sta_tid_stats *stats,
+ u32 msdu_count)
+{
+ if (msdu_count == 1)
+ stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_1]++;
+ else if (msdu_count == 2)
+ stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_2]++;
+ else if (msdu_count == 3)
+ stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_3]++;
+ else if (msdu_count == 4)
+ stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_4]++;
+ else if (msdu_count > 4)
+ stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MORE]++;
+}
+
+static void ath10k_rx_stats_update_ampdu_subfrm(struct ath10k *ar,
+ struct ath10k_sta_tid_stats *stats,
+ u32 mpdu_count)
+{
+ if (mpdu_count <= 10)
+ stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_10]++;
+ else if (mpdu_count <= 20)
+ stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_20]++;
+ else if (mpdu_count <= 30)
+ stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_30]++;
+ else if (mpdu_count <= 40)
+ stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_40]++;
+ else if (mpdu_count <= 50)
+ stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_50]++;
+ else if (mpdu_count <= 60)
+ stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_60]++;
+ else if (mpdu_count > 60)
+ stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_MORE]++;
+}
+
+void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, u16 peer_id, u8 tid,
+ struct htt_rx_indication_mpdu_range *ranges,
+ int num_ranges)
+{
+ struct ath10k_sta *arsta;
+ struct ath10k_peer *peer;
+ int i;
+
+ if (tid > IEEE80211_NUM_TIDS || !(ar->sta_tid_stats_mask & BIT(tid)))
+ return;
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer || !peer->sta)
+ goto out;
+
+ arsta = (struct ath10k_sta *)peer->sta->drv_priv;
+
+ for (i = 0; i < num_ranges; i++)
+ ath10k_rx_stats_update_ampdu_subfrm(ar,
+ &arsta->tid_stats[tid],
+ ranges[i].mpdu_count);
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
+void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr,
+ unsigned long num_msdus,
+ enum ath10k_pkt_rx_err err,
+ unsigned long unchain_cnt,
+ unsigned long drop_cnt,
+ unsigned long drop_cnt_filter,
+ unsigned long queued_msdus)
+{
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+ struct ieee80211_hdr *hdr;
+ struct ath10k_sta_tid_stats *stats;
+ u8 tid = IEEE80211_NUM_TIDS;
+ bool non_data_frm = false;
+
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ if (!ieee80211_is_data(hdr->frame_control))
+ non_data_frm = true;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+
+ if (!(ar->sta_tid_stats_mask & BIT(tid)) || non_data_frm)
+ return;
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, hdr->addr2, NULL);
+ if (!sta)
+ goto exit;
+
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+ stats = &arsta->tid_stats[tid];
+ stats->rx_pkt_from_fw += num_msdus;
+ stats->rx_pkt_unchained += unchain_cnt;
+ stats->rx_pkt_drop_chained += drop_cnt;
+ stats->rx_pkt_drop_filter += drop_cnt_filter;
+ if (err != ATH10K_PKT_RX_ERR_MAX)
+ stats->rx_pkt_err[err] += queued_msdus;
+ stats->rx_pkt_queued_for_mac += queued_msdus;
+ ath10k_rx_stats_update_amsdu_subfrm(ar, &arsta->tid_stats[tid],
+ num_msdus);
+ spin_unlock_bh(&ar->data_lock);
+
+exit:
+ rcu_read_unlock();
+}
+
+static void ath10k_sta_update_extd_stats_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
+{
+ struct ath10k_fw_extd_stats_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+
+ rcu_read_lock();
+ list_for_each_entry(peer, &stats->peers_extd, list) {
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr,
+ NULL);
+ if (!sta)
+ continue;
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ arsta->rx_duration += (u64)peer->rx_duration;
+ }
+ rcu_read_unlock();
+}
+
+static void ath10k_sta_update_stats_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
+{
+ struct ath10k_fw_stats_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+
+ rcu_read_lock();
+ list_for_each_entry(peer, &stats->peers, list) {
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr,
+ NULL);
+ if (!sta)
+ continue;
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ arsta->rx_duration += (u64)peer->rx_duration;
+ }
+ rcu_read_unlock();
+}
+
+void ath10k_sta_update_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
+{
+ if (stats->extended)
+ ath10k_sta_update_extd_stats_rx_duration(ar, stats);
+ else
+ ath10k_sta_update_stats_rx_duration(ar, stats);
+}
+
+static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ char buf[32];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "aggregation mode: %s\n",
+ (arsta->aggr_mode == ATH10K_DBG_AGGR_MODE_AUTO) ?
+ "auto" : "manual");
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_dbg_sta_write_aggr_mode(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 aggr_mode;
+ int ret;
+
+ if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
+ return -EINVAL;
+
+ if (aggr_mode >= ATH10K_DBG_AGGR_MODE_MAX)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (aggr_mode == arsta->aggr_mode)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to clear addba session ret: %d\n", ret);
+ goto out;
+ }
+
+ arsta->aggr_mode = aggr_mode;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_aggr_mode = {
+ .read = ath10k_dbg_sta_read_aggr_mode,
+ .write = ath10k_dbg_sta_write_aggr_mode,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_write_addba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 tid, buf_size;
+ int ret;
+ char buf[64] = {};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u", &tid, &buf_size);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, buf_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, buf_size);
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba = {
+ .write = ath10k_dbg_sta_write_addba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 tid, status;
+ int ret;
+ char buf[64] = {};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u", &tid, &status);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, status);
+ if (ret) {
+ ath10k_warn(ar, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, status);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba_resp = {
+ .write = ath10k_dbg_sta_write_addba_resp,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_write_delba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 tid, initiator, reason;
+ int ret;
+ char buf[64] = {};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
+ if (ret != 3)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, initiator, reason);
+ if (ret) {
+ ath10k_warn(ar, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, initiator,
+ reason);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_delba = {
+ .write = ath10k_dbg_sta_write_delba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_read_peer_debug_trigger(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ char buf[8];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len,
+ "Write 1 to once trigger the debug logs\n");
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u8 peer_debug_trigger;
+ int ret;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &peer_debug_trigger))
+ return -EINVAL;
+
+ if (peer_debug_trigger != 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ ret = ath10k_wmi_peer_set_param(ar, arsta->arvif->vdev_id, sta->addr,
+ ar->wmi.peer_param->debug, peer_debug_trigger);
+ if (ret) {
+ ath10k_warn(ar, "failed to set param to trigger peer tid logs for station ret: %d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ?: count;
+}
+
+static const struct file_operations fops_peer_debug_trigger = {
+ .open = simple_open,
+ .read = ath10k_dbg_sta_read_peer_debug_trigger,
+ .write = ath10k_dbg_sta_write_peer_debug_trigger,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_read_peer_ps_state(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ char buf[20];
+ int len = 0;
+
+ spin_lock_bh(&ar->data_lock);
+
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ arsta->peer_ps_state);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_ps_state = {
+ .open = simple_open,
+ .read = ath10k_dbg_sta_read_peer_ps_state,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static char *get_err_str(enum ath10k_pkt_rx_err i)
+{
+ switch (i) {
+ case ATH10K_PKT_RX_ERR_FCS:
+ return "fcs_err";
+ case ATH10K_PKT_RX_ERR_TKIP:
+ return "tkip_err";
+ case ATH10K_PKT_RX_ERR_CRYPT:
+ return "crypt_err";
+ case ATH10K_PKT_RX_ERR_PEER_IDX_INVAL:
+ return "peer_idx_inval";
+ case ATH10K_PKT_RX_ERR_MAX:
+ return "unknown";
+ }
+
+ return "unknown";
+}
+
+static char *get_num_ampdu_subfrm_str(enum ath10k_ampdu_subfrm_num i)
+{
+ switch (i) {
+ case ATH10K_AMPDU_SUBFRM_NUM_10:
+ return "up to 10";
+ case ATH10K_AMPDU_SUBFRM_NUM_20:
+ return "11-20";
+ case ATH10K_AMPDU_SUBFRM_NUM_30:
+ return "21-30";
+ case ATH10K_AMPDU_SUBFRM_NUM_40:
+ return "31-40";
+ case ATH10K_AMPDU_SUBFRM_NUM_50:
+ return "41-50";
+ case ATH10K_AMPDU_SUBFRM_NUM_60:
+ return "51-60";
+ case ATH10K_AMPDU_SUBFRM_NUM_MORE:
+ return ">60";
+ case ATH10K_AMPDU_SUBFRM_NUM_MAX:
+ return "0";
+ }
+
+ return "0";
+}
+
+static char *get_num_amsdu_subfrm_str(enum ath10k_amsdu_subfrm_num i)
+{
+ switch (i) {
+ case ATH10K_AMSDU_SUBFRM_NUM_1:
+ return "1";
+ case ATH10K_AMSDU_SUBFRM_NUM_2:
+ return "2";
+ case ATH10K_AMSDU_SUBFRM_NUM_3:
+ return "3";
+ case ATH10K_AMSDU_SUBFRM_NUM_4:
+ return "4";
+ case ATH10K_AMSDU_SUBFRM_NUM_MORE:
+ return ">4";
+ case ATH10K_AMSDU_SUBFRM_NUM_MAX:
+ return "0";
+ }
+
+ return "0";
+}
+
+#define PRINT_TID_STATS(_field, _tabs) \
+ do { \
+ int k = 0; \
+ for (j = 0; j <= IEEE80211_NUM_TIDS; j++) { \
+ if (ar->sta_tid_stats_mask & BIT(j)) { \
+ len += scnprintf(buf + len, buf_len - len, \
+ "[%02d] %-10lu ", \
+ j, stats[j]._field); \
+ k++; \
+ if (k % 8 == 0) { \
+ len += scnprintf(buf + len, \
+ buf_len - len, "\n"); \
+ len += scnprintf(buf + len, \
+ buf_len - len, \
+ _tabs); \
+ } \
+ } \
+ } \
+ len += scnprintf(buf + len, buf_len - len, "\n"); \
+ } while (0)
+
+static ssize_t ath10k_dbg_sta_read_tid_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ struct ath10k_sta_tid_stats *stats = arsta->tid_stats;
+ size_t len = 0, buf_len = 1048 * IEEE80211_NUM_TIDS;
+ char *buf;
+ int i, j;
+ ssize_t ret;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "\n\t\tDriver Rx pkt stats per tid, ([tid] count)\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "\t\t------------------------------------------\n");
+ len += scnprintf(buf + len, buf_len - len, "MSDUs from FW\t\t\t");
+ PRINT_TID_STATS(rx_pkt_from_fw, "\t\t\t\t");
+
+ len += scnprintf(buf + len, buf_len - len, "MSDUs unchained\t\t\t");
+ PRINT_TID_STATS(rx_pkt_unchained, "\t\t\t\t");
+
+ len += scnprintf(buf + len, buf_len - len,
+ "MSDUs locally dropped:chained\t");
+ PRINT_TID_STATS(rx_pkt_drop_chained, "\t\t\t\t");
+
+ len += scnprintf(buf + len, buf_len - len,
+ "MSDUs locally dropped:filtered\t");
+ PRINT_TID_STATS(rx_pkt_drop_filter, "\t\t\t\t");
+
+ len += scnprintf(buf + len, buf_len - len,
+ "MSDUs queued for mac80211\t");
+ PRINT_TID_STATS(rx_pkt_queued_for_mac, "\t\t\t\t");
+
+ for (i = 0; i < ATH10K_PKT_RX_ERR_MAX; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "MSDUs with error:%s\t", get_err_str(i));
+ PRINT_TID_STATS(rx_pkt_err[i], "\t\t\t\t");
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ for (i = 0; i < ATH10K_AMPDU_SUBFRM_NUM_MAX; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "A-MPDU num subframes %s\t",
+ get_num_ampdu_subfrm_str(i));
+ PRINT_TID_STATS(rx_pkt_ampdu[i], "\t\t\t\t");
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ for (i = 0; i < ATH10K_AMSDU_SUBFRM_NUM_MAX; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "A-MSDU num subframes %s\t\t",
+ get_num_amsdu_subfrm_str(i));
+ PRINT_TID_STATS(rx_pkt_amsdu[i], "\t\t\t\t");
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_tid_stats_dump = {
+ .open = simple_open,
+ .read = ath10k_dbg_sta_read_tid_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ struct ath10k_htt_data_stats *stats;
+ const char *str_name[ATH10K_STATS_TYPE_MAX] = {"succ", "fail",
+ "retry", "ampdu"};
+ const char *str[ATH10K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
+ int len = 0, i, j, k, retval = 0;
+ const int size = 16 * 4096;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (!arsta->tx_stats) {
+ ath10k_warn(ar, "failed to get tx stats");
+ mutex_unlock(&ar->conf_mutex);
+ kfree(buf);
+ return 0;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ for (k = 0; k < ATH10K_STATS_TYPE_MAX; k++) {
+ for (j = 0; j < ATH10K_COUNTER_TYPE_MAX; j++) {
+ stats = &arsta->tx_stats->stats[k];
+ len += scnprintf(buf + len, size - len, "%s_%s\n",
+ str_name[k],
+ str[j]);
+ len += scnprintf(buf + len, size - len,
+ " VHT MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH10K_VHT_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ",
+ stats->vht[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, " HT MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH10K_HT_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ", stats->ht[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " BW %s (20,5,10,40,80,160 MHz)\n", str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu %llu %llu\n",
+ stats->bw[j][0], stats->bw[j][1],
+ stats->bw[j][2], stats->bw[j][3],
+ stats->bw[j][4], stats->bw[j][5]);
+ len += scnprintf(buf + len, size - len,
+ " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->nss[j][0], stats->nss[j][1],
+ stats->nss[j][2], stats->nss[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " GI %s (LGI,SGI)\n",
+ str[j]);
+ len += scnprintf(buf + len, size - len, " %llu %llu\n",
+ stats->gi[j][0], stats->gi[j][1]);
+ len += scnprintf(buf + len, size - len,
+ " legacy rate %s (1,2 ... Mbps)\n ",
+ str[j]);
+ for (i = 0; i < ATH10K_LEGACY_NUM; i++)
+ len += scnprintf(buf + len, size - len, "%llu ",
+ stats->legacy[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " Rate table %s (1,2 ... Mbps)\n ",
+ str[j]);
+ for (i = 0; i < ATH10K_RATE_TABLE_NUM; i++) {
+ len += scnprintf(buf + len, size - len, "%llu ",
+ stats->rate_table[j][i]);
+ if (!((i + 1) % 8))
+ len +=
+ scnprintf(buf + len, size - len, "\n ");
+ }
+ }
+ }
+
+ len += scnprintf(buf + len, size - len,
+ "\nTX duration\n %llu usecs\n",
+ arsta->tx_stats->tx_duration);
+ len += scnprintf(buf + len, size - len,
+ "BA fails\n %llu\n", arsta->tx_stats->ba_fails);
+ len += scnprintf(buf + len, size - len,
+ "ack fails\n %llu\n", arsta->tx_stats->ack_fails);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ mutex_unlock(&ar->conf_mutex);
+ return retval;
+}
+
+static const struct file_operations fops_tx_stats = {
+ .read = ath10k_dbg_sta_dump_tx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
+{
+ struct ath10k *ar = hw->priv;
+
+ debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
+ debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
+ debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
+ debugfs_create_file("delba", 0200, dir, sta, &fops_delba);
+ debugfs_create_file("peer_debug_trigger", 0600, dir, sta,
+ &fops_peer_debug_trigger);
+ debugfs_create_file("dump_tid_stats", 0400, dir, sta,
+ &fops_tid_stats_dump);
+
+ if (ath10k_peer_stats_enabled(ar) &&
+ ath10k_debug_is_extd_tx_stats_enabled(ar))
+ debugfs_create_file("tx_stats", 0400, dir, sta,
+ &fops_tx_stats);
+ debugfs_create_file("peer_ps_state", 0400, dir, sta,
+ &fops_peer_ps_state);
+}
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
new file mode 100644
index 000000000000..9e45fd9073a6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -0,0 +1,258 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
+ */
+
+#ifndef _HIF_H_
+#define _HIF_H_
+
+#include <linux/kernel.h>
+#include "core.h"
+#include "bmi.h"
+#include "debug.h"
+
+/* Types of fw logging mode */
+enum ath_dbg_mode {
+ ATH10K_ENABLE_FW_LOG_DIAG,
+ ATH10K_ENABLE_FW_LOG_CE,
+};
+
+struct ath10k_hif_sg_item {
+ u16 transfer_id;
+ void *transfer_context; /* NULL = tx completion callback not called */
+ void *vaddr; /* for debugging mostly */
+ dma_addr_t paddr;
+ u16 len;
+};
+
+struct ath10k_hif_ops {
+ /* send a scatter-gather list to the target */
+ int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items);
+
+ /* read firmware memory through the diagnose interface */
+ int (*diag_read)(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len);
+
+ int (*diag_write)(struct ath10k *ar, u32 address, const void *data,
+ int nbytes);
+ /*
+ * API to handle HIF-specific BMI message exchanges, this API is
+ * synchronous and only allowed to be called from a context that
+ * can block (sleep)
+ */
+ int (*exchange_bmi_msg)(struct ath10k *ar,
+ void *request, u32 request_len,
+ void *response, u32 *response_len);
+
+ /* Post BMI phase, after FW is loaded. Starts regular operation */
+ int (*start)(struct ath10k *ar);
+
+ /* Clean up what start() did. This does not revert to BMI phase. If
+ * desired so, call power_down() and power_up()
+ */
+ void (*stop)(struct ath10k *ar);
+
+ int (*start_post)(struct ath10k *ar);
+
+ int (*get_htt_tx_complete)(struct ath10k *ar);
+
+ int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+
+ void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
+
+ /*
+ * Check if prior sends have completed.
+ *
+ * Check whether the pipe in question has any completed
+ * sends that have not yet been processed.
+ * This function is only relevant for HIF pipes that are configured
+ * to be polled rather than interrupt-driven.
+ */
+ void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
+
+ u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
+
+ u32 (*read32)(struct ath10k *ar, u32 address);
+
+ void (*write32)(struct ath10k *ar, u32 address, u32 value);
+
+ /* Power up the device and enter BMI transfer mode for FW download */
+ int (*power_up)(struct ath10k *ar, enum ath10k_firmware_mode fw_mode);
+
+ /* Power down the device and free up resources. stop() must be called
+ * before this if start() was called earlier
+ */
+ void (*power_down)(struct ath10k *ar);
+
+ int (*suspend)(struct ath10k *ar);
+ int (*resume)(struct ath10k *ar);
+
+ /* fetch calibration data from target eeprom */
+ int (*fetch_cal_eeprom)(struct ath10k *ar, void **data,
+ size_t *data_len);
+
+ int (*get_target_info)(struct ath10k *ar,
+ struct bmi_target_info *target_info);
+ int (*set_target_log_mode)(struct ath10k *ar, u8 fw_log_mode);
+};
+
+static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items,
+ int n_items)
+{
+ return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
+}
+
+static inline int ath10k_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+{
+ return ar->hif.ops->diag_read(ar, address, buf, buf_len);
+}
+
+static inline int ath10k_hif_diag_write(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
+{
+ if (!ar->hif.ops->diag_write)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->diag_write(ar, address, data, nbytes);
+}
+
+static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
+ void *request, u32 request_len,
+ void *response, u32 *response_len)
+{
+ return ar->hif.ops->exchange_bmi_msg(ar, request, request_len,
+ response, response_len);
+}
+
+static inline int ath10k_hif_start(struct ath10k *ar)
+{
+ return ar->hif.ops->start(ar);
+}
+
+static inline void ath10k_hif_stop(struct ath10k *ar)
+{
+ return ar->hif.ops->stop(ar);
+}
+
+static inline int ath10k_hif_start_post(struct ath10k *ar)
+{
+ if (ar->hif.ops->start_post)
+ return ar->hif.ops->start_post(ar);
+ return 0;
+}
+
+static inline int ath10k_hif_get_htt_tx_complete(struct ath10k *ar)
+{
+ if (ar->hif.ops->get_htt_tx_complete)
+ return ar->hif.ops->get_htt_tx_complete(ar);
+ return 0;
+}
+
+static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
+ u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ return ar->hif.ops->map_service_to_pipe(ar, service_id,
+ ul_pipe, dl_pipe);
+}
+
+static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe);
+}
+
+static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
+ u8 pipe_id, int force)
+{
+ if (ar->hif.ops->send_complete_check)
+ ar->hif.ops->send_complete_check(ar, pipe_id, force);
+}
+
+static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
+ u8 pipe_id)
+{
+ return ar->hif.ops->get_free_queue_number(ar, pipe_id);
+}
+
+static inline int ath10k_hif_power_up(struct ath10k *ar,
+ enum ath10k_firmware_mode fw_mode)
+{
+ return ar->hif.ops->power_up(ar, fw_mode);
+}
+
+static inline void ath10k_hif_power_down(struct ath10k *ar)
+{
+ ar->hif.ops->power_down(ar);
+}
+
+static inline int ath10k_hif_suspend(struct ath10k *ar)
+{
+ if (!ar->hif.ops->suspend)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->suspend(ar);
+}
+
+static inline int ath10k_hif_resume(struct ath10k *ar)
+{
+ if (!ar->hif.ops->resume)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->resume(ar);
+}
+
+static inline u32 ath10k_hif_read32(struct ath10k *ar, u32 address)
+{
+ if (!ar->hif.ops->read32) {
+ ath10k_warn(ar, "hif read32 not supported\n");
+ return 0xdeaddead;
+ }
+
+ return ar->hif.ops->read32(ar, address);
+}
+
+static inline void ath10k_hif_write32(struct ath10k *ar,
+ u32 address, u32 data)
+{
+ if (!ar->hif.ops->write32) {
+ ath10k_warn(ar, "hif write32 not supported\n");
+ return;
+ }
+
+ ar->hif.ops->write32(ar, address, data);
+}
+
+static inline int ath10k_hif_fetch_cal_eeprom(struct ath10k *ar,
+ void **data,
+ size_t *data_len)
+{
+ if (!ar->hif.ops->fetch_cal_eeprom)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->fetch_cal_eeprom(ar, data, data_len);
+}
+
+static inline int ath10k_hif_get_target_info(struct ath10k *ar,
+ struct bmi_target_info *tgt_info)
+{
+ if (!ar->hif.ops->get_target_info)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->get_target_info(ar, tgt_info);
+}
+
+static inline int ath10k_hif_set_target_log_mode(struct ath10k *ar,
+ u8 fw_log_mode)
+{
+ if (!ar->hif.ops->set_target_log_mode)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->set_target_log_mode(ar, fw_log_mode);
+}
+#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
new file mode 100644
index 000000000000..ce9b248c12dc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -0,0 +1,1324 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/export.h>
+
+#include "core.h"
+#include "hif.h"
+#include "debug.h"
+
+/********/
+/* Send */
+/********/
+
+static void ath10k_htc_control_tx_complete(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
+{
+ struct sk_buff *skb;
+ struct ath10k_skb_cb *skb_cb;
+
+ skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
+ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+ skb_cb = ATH10K_SKB_CB(skb);
+ memset(skb_cb, 0, sizeof(*skb_cb));
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
+ return skb;
+}
+
+static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
+ struct sk_buff *skb)
+{
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+
+ if (htc->ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
+ dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+}
+
+void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
+ struct sk_buff *skb)
+{
+ struct ath10k *ar = ep->htc->ar;
+ struct ath10k_htc_hdr *hdr;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
+ ep->eid, skb);
+
+ /* A corner case where the copy completion is reaching to host but still
+ * copy engine is processing it due to which host unmaps corresponding
+ * memory and causes SMMU fault, hence as workaround adding delay
+ * the unmapping memory to avoid SMMU faults.
+ */
+ if (ar->hw_params.delay_unmap_buffer &&
+ ep->ul_pipe_id == 3)
+ mdelay(2);
+
+ hdr = (struct ath10k_htc_hdr *)skb->data;
+ ath10k_htc_restore_tx_skb(ep->htc, skb);
+
+ if (!ep->ep_ops.ep_tx_complete) {
+ ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ if (hdr->flags & ATH10K_HTC_FLAG_SEND_BUNDLE) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
+}
+EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
+
+static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
+ struct sk_buff *skb)
+{
+ struct ath10k_htc_hdr *hdr;
+
+ hdr = (struct ath10k_htc_hdr *)skb->data;
+ memset(hdr, 0, sizeof(struct ath10k_htc_hdr));
+
+ hdr->eid = ep->eid;
+ hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
+ hdr->flags = 0;
+ if (ep->tx_credit_flow_enabled && !ep->bundle_tx)
+ hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
+
+ spin_lock_bh(&ep->htc->tx_lock);
+ hdr->seq_no = ep->seq_no++;
+ spin_unlock_bh(&ep->htc->tx_lock);
+}
+
+static int ath10k_htc_consume_credit(struct ath10k_htc_ep *ep,
+ unsigned int len,
+ bool consume)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ enum ath10k_htc_ep_id eid = ep->eid;
+ int credits, ret = 0;
+
+ if (!ep->tx_credit_flow_enabled)
+ return 0;
+
+ credits = DIV_ROUND_UP(len, ep->tx_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+
+ if (ep->tx_credits < credits) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc insufficient credits ep %d required %d available %d consume %d\n",
+ eid, credits, ep->tx_credits, consume);
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ if (consume) {
+ ep->tx_credits -= credits;
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc ep %d consumed %d credits total %d\n",
+ eid, credits, ep->tx_credits);
+ }
+
+unlock:
+ spin_unlock_bh(&htc->tx_lock);
+ return ret;
+}
+
+static void ath10k_htc_release_credit(struct ath10k_htc_ep *ep, unsigned int len)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ enum ath10k_htc_ep_id eid = ep->eid;
+ int credits;
+
+ if (!ep->tx_credit_flow_enabled)
+ return;
+
+ credits = DIV_ROUND_UP(len, ep->tx_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc ep %d reverted %d credits back total %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ar);
+}
+
+int ath10k_htc_send(struct ath10k_htc *htc,
+ enum ath10k_htc_ep_id eid,
+ struct sk_buff *skb)
+{
+ struct ath10k *ar = htc->ar;
+ struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+ struct ath10k_hif_sg_item sg_item;
+ struct device *dev = htc->ar->dev;
+ int ret;
+ unsigned int skb_len;
+
+ if (htc->ar->state == ATH10K_STATE_WEDGED)
+ return -ECOMM;
+
+ if (eid >= ATH10K_HTC_EP_COUNT) {
+ ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
+ return -ENOENT;
+ }
+
+ skb_push(skb, sizeof(struct ath10k_htc_hdr));
+
+ skb_len = skb->len;
+ ret = ath10k_htc_consume_credit(ep, skb_len, true);
+ if (ret)
+ goto err_pull;
+
+ ath10k_htc_prepare_tx_skb(ep, skb);
+
+ skb_cb->eid = eid;
+ if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, skb_cb->paddr);
+ if (ret) {
+ ret = -EIO;
+ goto err_credits;
+ }
+ }
+
+ sg_item.transfer_id = ep->eid;
+ sg_item.transfer_context = skb;
+ sg_item.vaddr = skb->data;
+ sg_item.paddr = skb_cb->paddr;
+ sg_item.len = skb->len;
+
+ ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+
+err_unmap:
+ if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
+ dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+err_credits:
+ ath10k_htc_release_credit(ep, skb_len);
+err_pull:
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ return ret;
+}
+
+void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_skb_cb *skb_cb;
+ struct ath10k_htc_ep *ep;
+
+ if (WARN_ON_ONCE(!skb))
+ return;
+
+ skb_cb = ATH10K_SKB_CB(skb);
+ ep = &htc->endpoint[skb_cb->eid];
+
+ ath10k_htc_notify_tx_completion(ep, skb);
+ /* the skb now belongs to the completion handler */
+}
+EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
+
+/***********/
+/* Receive */
+/***********/
+
+static void
+ath10k_htc_process_credit_report(struct ath10k_htc *htc,
+ const struct ath10k_htc_credit_report *report,
+ int len,
+ enum ath10k_htc_ep_id eid)
+{
+ struct ath10k *ar = htc->ar;
+ struct ath10k_htc_ep *ep;
+ int i, n_reports;
+
+ if (len % sizeof(*report))
+ ath10k_warn(ar, "Uneven credit report len %d", len);
+
+ n_reports = len / sizeof(*report);
+
+ spin_lock_bh(&htc->tx_lock);
+ for (i = 0; i < n_reports; i++, report++) {
+ if (report->eid >= ATH10K_HTC_EP_COUNT)
+ break;
+
+ ep = &htc->endpoint[report->eid];
+ ep->tx_credits += report->credits;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
+ report->eid, report->credits, ep->tx_credits);
+
+ if (ep->ep_ops.ep_tx_credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ spin_lock_bh(&htc->tx_lock);
+ }
+ }
+ spin_unlock_bh(&htc->tx_lock);
+}
+
+static int
+ath10k_htc_process_lookahead(struct ath10k_htc *htc,
+ const struct ath10k_htc_lookahead_report *report,
+ int len,
+ enum ath10k_htc_ep_id eid,
+ void *next_lookaheads,
+ int *next_lookaheads_len)
+{
+ struct ath10k *ar = htc->ar;
+
+ /* Invalid lookahead flags are actually transmitted by
+ * the target in the HTC control message.
+ * Since this will happen at every boot we silently ignore
+ * the lookahead in this case
+ */
+ if (report->pre_valid != ((~report->post_valid) & 0xFF))
+ return 0;
+
+ if (next_lookaheads && next_lookaheads_len) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc rx lookahead found pre_valid 0x%x post_valid 0x%x\n",
+ report->pre_valid, report->post_valid);
+
+ /* look ahead bytes are valid, copy them over */
+ memcpy((u8 *)next_lookaheads, report->lookahead, 4);
+
+ *next_lookaheads_len = 1;
+ }
+
+ return 0;
+}
+
+static int
+ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
+ const struct ath10k_htc_lookahead_bundle *report,
+ int len,
+ enum ath10k_htc_ep_id eid,
+ void *next_lookaheads,
+ int *next_lookaheads_len)
+{
+ struct ath10k *ar = htc->ar;
+ int bundle_cnt = len / sizeof(*report);
+
+ if (!bundle_cnt || (bundle_cnt > htc->max_msgs_per_htc_bundle)) {
+ ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
+ bundle_cnt);
+ return -EINVAL;
+ }
+
+ if (next_lookaheads && next_lookaheads_len) {
+ int i;
+
+ for (i = 0; i < bundle_cnt; i++) {
+ memcpy(((u8 *)next_lookaheads) + 4 * i,
+ report->lookahead, 4);
+ report++;
+ }
+
+ *next_lookaheads_len = bundle_cnt;
+ }
+
+ return 0;
+}
+
+int ath10k_htc_process_trailer(struct ath10k_htc *htc,
+ u8 *buffer,
+ int length,
+ enum ath10k_htc_ep_id src_eid,
+ void *next_lookaheads,
+ int *next_lookaheads_len)
+{
+ struct ath10k_htc_lookahead_bundle *bundle;
+ struct ath10k *ar = htc->ar;
+ int status = 0;
+ struct ath10k_htc_record *record;
+ u8 *orig_buffer;
+ int orig_length;
+ size_t len;
+
+ orig_buffer = buffer;
+ orig_length = length;
+
+ while (length > 0) {
+ record = (struct ath10k_htc_record *)buffer;
+
+ if (length < sizeof(record->hdr)) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (record->hdr.len > length) {
+ /* no room left in buffer for record */
+ ath10k_warn(ar, "Invalid record length: %d\n",
+ record->hdr.len);
+ status = -EINVAL;
+ break;
+ }
+
+ switch (record->hdr.id) {
+ case ATH10K_HTC_RECORD_CREDITS:
+ len = sizeof(struct ath10k_htc_credit_report);
+ if (record->hdr.len < len) {
+ ath10k_warn(ar, "Credit report too long\n");
+ status = -EINVAL;
+ break;
+ }
+ ath10k_htc_process_credit_report(htc,
+ record->credit_report,
+ record->hdr.len,
+ src_eid);
+ break;
+ case ATH10K_HTC_RECORD_LOOKAHEAD:
+ len = sizeof(struct ath10k_htc_lookahead_report);
+ if (record->hdr.len < len) {
+ ath10k_warn(ar, "Lookahead report too long\n");
+ status = -EINVAL;
+ break;
+ }
+ status = ath10k_htc_process_lookahead(htc,
+ record->lookahead_report,
+ record->hdr.len,
+ src_eid,
+ next_lookaheads,
+ next_lookaheads_len);
+ break;
+ case ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE:
+ bundle = record->lookahead_bundle;
+ status = ath10k_htc_process_lookahead_bundle(htc,
+ bundle,
+ record->hdr.len,
+ src_eid,
+ next_lookaheads,
+ next_lookaheads_len);
+ break;
+ default:
+ ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
+ record->hdr.id, record->hdr.len);
+ break;
+ }
+
+ if (status)
+ break;
+
+ /* multiple records may be present in a trailer */
+ buffer += sizeof(record->hdr) + record->hdr.len;
+ length -= sizeof(record->hdr) + record->hdr.len;
+ }
+
+ if (status)
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
+ orig_buffer, orig_length);
+
+ return status;
+}
+EXPORT_SYMBOL(ath10k_htc_process_trailer);
+
+void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+ int status = 0;
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_hdr *hdr;
+ struct ath10k_htc_ep *ep;
+ u16 payload_len;
+ u32 trailer_len = 0;
+ size_t min_len;
+ u8 eid;
+ bool trailer_present;
+
+ hdr = (struct ath10k_htc_hdr *)skb->data;
+ skb_pull(skb, sizeof(*hdr));
+
+ eid = hdr->eid;
+
+ if (eid >= ATH10K_HTC_EP_COUNT) {
+ ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
+ hdr, sizeof(*hdr));
+ goto out;
+ }
+
+ ep = &htc->endpoint[eid];
+ if (ep->service_id == ATH10K_HTC_SVC_ID_UNUSED) {
+ ath10k_warn(ar, "htc rx endpoint %d is not connected\n", eid);
+ goto out;
+ }
+
+ payload_len = __le16_to_cpu(hdr->len);
+
+ if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
+ ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
+ payload_len + sizeof(*hdr));
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
+ hdr, sizeof(*hdr));
+ goto out;
+ }
+
+ if (skb->len < payload_len) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "HTC Rx: insufficient length, got %d, expected %d\n",
+ skb->len, payload_len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
+ "", hdr, sizeof(*hdr));
+ goto out;
+ }
+
+ /* get flags to check for trailer */
+ trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
+ if (trailer_present) {
+ u8 *trailer;
+
+ trailer_len = hdr->trailer_len;
+ min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
+
+ if ((trailer_len < min_len) ||
+ (trailer_len > payload_len)) {
+ ath10k_warn(ar, "Invalid trailer length: %d\n",
+ trailer_len);
+ goto out;
+ }
+
+ trailer = (u8 *)hdr;
+ trailer += sizeof(*hdr);
+ trailer += payload_len;
+ trailer -= trailer_len;
+ status = ath10k_htc_process_trailer(htc, trailer,
+ trailer_len, hdr->eid,
+ NULL, NULL);
+ if (status)
+ goto out;
+
+ skb_trim(skb, skb->len - trailer_len);
+ }
+
+ if (((int)payload_len - (int)trailer_len) <= 0)
+ /* zero length packet with trailer data, just drop these */
+ goto out;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
+ eid, skb);
+ ep->ep_ops.ep_rx_complete(ar, skb);
+
+ /* skb is now owned by the rx completion handler */
+ skb = NULL;
+out:
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
+
+static void ath10k_htc_control_rx_complete(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
+
+ switch (__le16_to_cpu(msg->hdr.message_id)) {
+ case ATH10K_HTC_MSG_READY_ID:
+ case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
+ /* handle HTC control message */
+ if (completion_done(&htc->ctl_resp)) {
+ /* this is a fatal error, target should not be
+ * sending unsolicited messages on the ep 0
+ */
+ ath10k_warn(ar, "HTC rx ctrl still processing\n");
+ complete(&htc->ctl_resp);
+ goto out;
+ }
+
+ htc->control_resp_len =
+ min_t(int, skb->len,
+ ATH10K_HTC_MAX_CTRL_MSG_LEN);
+
+ memcpy(htc->control_resp_buffer, skb->data,
+ htc->control_resp_len);
+
+ complete(&htc->ctl_resp);
+ break;
+ case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
+ htc->htc_ops.target_send_suspend_complete(ar);
+ break;
+ default:
+ ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
+ break;
+ }
+
+out:
+ kfree_skb(skb);
+}
+
+/***************/
+/* Init/Deinit */
+/***************/
+
+static const char *htc_service_name(enum ath10k_htc_svc_id id)
+{
+ switch (id) {
+ case ATH10K_HTC_SVC_ID_RESERVED:
+ return "Reserved";
+ case ATH10K_HTC_SVC_ID_RSVD_CTRL:
+ return "Control";
+ case ATH10K_HTC_SVC_ID_WMI_CONTROL:
+ return "WMI";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
+ return "DATA BE";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
+ return "DATA BK";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
+ return "DATA VI";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
+ return "DATA VO";
+ case ATH10K_HTC_SVC_ID_NMI_CONTROL:
+ return "NMI Control";
+ case ATH10K_HTC_SVC_ID_NMI_DATA:
+ return "NMI Data";
+ case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
+ return "HTT Data";
+ case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG:
+ return "HTT Data";
+ case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG:
+ return "HTT Data";
+ case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
+ return "RAW";
+ case ATH10K_HTC_SVC_ID_HTT_LOG_MSG:
+ return "PKTLOG";
+ }
+
+ return "Unknown";
+}
+
+static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
+{
+ struct ath10k_htc_ep *ep;
+ int i;
+
+ for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
+ ep = &htc->endpoint[i];
+ ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
+ ep->max_ep_message_len = 0;
+ ep->max_tx_queue_depth = 0;
+ ep->eid = i;
+ ep->htc = htc;
+ ep->tx_credit_flow_enabled = true;
+ }
+}
+
+static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
+ u16 service_id)
+{
+ u8 allocation = 0;
+
+ /* The WMI control service is the only service with flow control.
+ * Let it have all transmit credits.
+ */
+ if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL)
+ allocation = htc->total_transmit_credits;
+
+ return allocation;
+}
+
+static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
+ struct sk_buff *bundle_skb,
+ struct sk_buff_head *tx_save_head)
+{
+ struct ath10k_hif_sg_item sg_item;
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ struct sk_buff *skb;
+ int ret, cn = 0;
+ unsigned int skb_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle skb len %d\n", bundle_skb->len);
+ skb_len = bundle_skb->len;
+ ret = ath10k_htc_consume_credit(ep, skb_len, true);
+
+ if (!ret) {
+ sg_item.transfer_id = ep->eid;
+ sg_item.transfer_context = bundle_skb;
+ sg_item.vaddr = bundle_skb->data;
+ sg_item.len = bundle_skb->len;
+
+ ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
+ if (ret)
+ ath10k_htc_release_credit(ep, skb_len);
+ }
+
+ if (ret)
+ dev_kfree_skb_any(bundle_skb);
+
+ for (cn = 0; (skb = skb_dequeue_tail(tx_save_head)); cn++) {
+ if (ret) {
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ skb_queue_head(&ep->tx_req_head, skb);
+ } else {
+ skb_queue_tail(&ep->tx_complete_head, skb);
+ }
+ }
+
+ if (!ret)
+ queue_work(ar->workqueue_tx_complete, &ar->tx_complete_work);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "bundle tx status %d eid %d req count %d count %d len %d\n",
+ ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
+ return ret;
+}
+
+static void ath10k_htc_send_one_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct ath10k *ar = htc->ar;
+ int ret;
+
+ ret = ath10k_htc_send(htc, ep->eid, skb);
+
+ if (ret)
+ skb_queue_head(&ep->tx_req_head, skb);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "tx one status %d eid %d len %d pending count %d\n",
+ ret, ep->eid, skb->len, skb_queue_len(&ep->tx_req_head));
+}
+
+static int ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep *ep)
+{
+ struct ath10k_htc *htc = ep->htc;
+ struct sk_buff *bundle_skb, *skb;
+ struct sk_buff_head tx_save_head;
+ struct ath10k_htc_hdr *hdr;
+ u8 *bundle_buf;
+ int ret = 0, credit_pad, credit_remainder, trans_len, bundles_left = 0;
+
+ if (htc->ar->state == ATH10K_STATE_WEDGED)
+ return -ECOMM;
+
+ if (ep->tx_credit_flow_enabled &&
+ ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE)
+ return 0;
+
+ bundles_left = ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
+ bundle_skb = dev_alloc_skb(bundles_left);
+
+ if (!bundle_skb)
+ return -ENOMEM;
+
+ bundle_buf = bundle_skb->data;
+ skb_queue_head_init(&tx_save_head);
+
+ while (true) {
+ skb = skb_dequeue(&ep->tx_req_head);
+ if (!skb)
+ break;
+
+ credit_pad = 0;
+ trans_len = skb->len + sizeof(*hdr);
+ credit_remainder = trans_len % ep->tx_credit_size;
+
+ if (credit_remainder != 0) {
+ credit_pad = ep->tx_credit_size - credit_remainder;
+ trans_len += credit_pad;
+ }
+
+ ret = ath10k_htc_consume_credit(ep,
+ bundle_buf + trans_len - bundle_skb->data,
+ false);
+ if (ret) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ break;
+ }
+
+ if (bundles_left < trans_len) {
+ bundle_skb->len = bundle_buf - bundle_skb->data;
+ ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
+
+ if (ret) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ return ret;
+ }
+
+ if (skb_queue_len(&ep->tx_req_head) == 0) {
+ ath10k_htc_send_one_skb(ep, skb);
+ return ret;
+ }
+
+ if (ep->tx_credit_flow_enabled &&
+ ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ return 0;
+ }
+
+ bundles_left =
+ ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
+ bundle_skb = dev_alloc_skb(bundles_left);
+
+ if (!bundle_skb) {
+ skb_queue_head(&ep->tx_req_head, skb);
+ return -ENOMEM;
+ }
+ bundle_buf = bundle_skb->data;
+ skb_queue_head_init(&tx_save_head);
+ }
+
+ skb_push(skb, sizeof(struct ath10k_htc_hdr));
+ ath10k_htc_prepare_tx_skb(ep, skb);
+
+ memcpy(bundle_buf, skb->data, skb->len);
+ hdr = (struct ath10k_htc_hdr *)bundle_buf;
+ hdr->flags |= ATH10K_HTC_FLAG_SEND_BUNDLE;
+ hdr->pad_len = __cpu_to_le16(credit_pad);
+ bundle_buf += trans_len;
+ bundles_left -= trans_len;
+ skb_queue_tail(&tx_save_head, skb);
+ }
+
+ if (bundle_buf != bundle_skb->data) {
+ bundle_skb->len = bundle_buf - bundle_skb->data;
+ ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
+ } else {
+ dev_kfree_skb_any(bundle_skb);
+ }
+
+ return ret;
+}
+
+static void ath10k_htc_bundle_tx_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work);
+ struct ath10k_htc_ep *ep;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
+ ep = &ar->htc.endpoint[i];
+
+ if (!ep->bundle_tx)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx work eid %d count %d\n",
+ ep->eid, skb_queue_len(&ep->tx_req_head));
+
+ if (skb_queue_len(&ep->tx_req_head) >=
+ ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE) {
+ ath10k_htc_send_bundle_skbs(ep);
+ } else {
+ skb = skb_dequeue(&ep->tx_req_head);
+
+ if (!skb)
+ continue;
+ ath10k_htc_send_one_skb(ep, skb);
+ }
+ }
+}
+
+static void ath10k_htc_tx_complete_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work);
+ struct ath10k_htc_ep *ep;
+ enum ath10k_htc_ep_id eid;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
+ ep = &ar->htc.endpoint[i];
+ eid = ep->eid;
+ if (ep->bundle_tx && eid == ar->htt.eid) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx complete eid %d pending complete count%d\n",
+ ep->eid, skb_queue_len(&ep->tx_complete_head));
+
+ while (true) {
+ skb = skb_dequeue(&ep->tx_complete_head);
+ if (!skb)
+ break;
+ ath10k_htc_notify_tx_completion(ep, skb);
+ }
+ }
+ }
+}
+
+int ath10k_htc_send_hl(struct ath10k_htc *htc,
+ enum ath10k_htc_ep_id eid,
+ struct sk_buff *skb)
+{
+ struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath10k *ar = htc->ar;
+
+ if (sizeof(struct ath10k_htc_hdr) + skb->len > ep->tx_credit_size) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "tx exceed max len %d\n", skb->len);
+ return -ENOMEM;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc send hl eid %d bundle %d tx count %d len %d\n",
+ eid, ep->bundle_tx, skb_queue_len(&ep->tx_req_head), skb->len);
+
+ if (ep->bundle_tx) {
+ skb_queue_tail(&ep->tx_req_head, skb);
+ queue_work(ar->workqueue, &ar->bundle_tx_work);
+ return 0;
+ } else {
+ return ath10k_htc_send(htc, eid, skb);
+ }
+}
+
+void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep)
+{
+ if (ep->htc->max_msgs_per_htc_bundle >= ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE &&
+ !ep->bundle_tx) {
+ ep->bundle_tx = true;
+ skb_queue_head_init(&ep->tx_req_head);
+ skb_queue_head_init(&ep->tx_complete_head);
+ }
+}
+
+void ath10k_htc_stop_hl(struct ath10k *ar)
+{
+ struct ath10k_htc_ep *ep;
+ int i;
+
+ cancel_work_sync(&ar->bundle_tx_work);
+ cancel_work_sync(&ar->tx_complete_work);
+
+ for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
+ ep = &ar->htc.endpoint[i];
+
+ if (!ep->bundle_tx)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "stop tx work eid %d count %d\n",
+ ep->eid, skb_queue_len(&ep->tx_req_head));
+
+ skb_queue_purge(&ep->tx_req_head);
+ }
+}
+
+int ath10k_htc_wait_target(struct ath10k_htc *htc)
+{
+ struct ath10k *ar = htc->ar;
+ int i, status = 0;
+ unsigned long time_left;
+ struct ath10k_htc_msg *msg;
+ u16 message_id;
+
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_WAIT_TIMEOUT_HZ);
+ if (!time_left) {
+ /* Workaround: In some cases the PCI HIF doesn't
+ * receive interrupt for the control response message
+ * even if the buffer was completed. It is suspected
+ * iomap writes unmasking PCI CE irqs aren't propagated
+ * properly in KVM PCI-passthrough sometimes.
+ */
+ ath10k_warn(ar, "failed to receive control response completion, polling..\n");
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_hif_send_complete_check(htc->ar, i, 1);
+
+ time_left =
+ wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_WAIT_TIMEOUT_HZ);
+
+ if (!time_left)
+ status = -ETIMEDOUT;
+ }
+
+ if (status < 0) {
+ ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
+ return status;
+ }
+
+ if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
+ ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
+ htc->control_resp_len);
+ return -ECOMM;
+ }
+
+ msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
+ message_id = __le16_to_cpu(msg->hdr.message_id);
+
+ if (message_id != ATH10K_HTC_MSG_READY_ID) {
+ ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
+ return -ECOMM;
+ }
+
+ if (ar->hw_params.use_fw_tx_credits)
+ htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
+ else
+ htc->total_transmit_credits = 1;
+
+ htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "Target ready! transmit resources: %d size:%d actual credits:%d\n",
+ htc->total_transmit_credits,
+ htc->target_credit_size,
+ msg->ready.credit_count);
+
+ if ((htc->total_transmit_credits == 0) ||
+ (htc->target_credit_size == 0)) {
+ ath10k_err(ar, "Invalid credit size received\n");
+ return -ECOMM;
+ }
+
+ /* The only way to determine if the ready message is an extended
+ * message is from the size.
+ */
+ if (htc->control_resp_len >=
+ sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
+ htc->alt_data_credit_size =
+ __le16_to_cpu(msg->ready_ext.reserved) &
+ ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK;
+ htc->max_msgs_per_htc_bundle =
+ min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
+ HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "Extended ready message RX bundle size %d alt size %d\n",
+ htc->max_msgs_per_htc_bundle,
+ htc->alt_data_credit_size);
+ }
+
+ INIT_WORK(&ar->bundle_tx_work, ath10k_htc_bundle_tx_work);
+ INIT_WORK(&ar->tx_complete_work, ath10k_htc_tx_complete_work);
+
+ return 0;
+}
+
+void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
+ enum ath10k_htc_ep_id eid,
+ bool enable)
+{
+ struct ath10k *ar = htc->ar;
+ struct ath10k_htc_ep *ep = &ar->htc.endpoint[eid];
+
+ ep->tx_credit_flow_enabled = enable;
+}
+
+int ath10k_htc_connect_service(struct ath10k_htc *htc,
+ struct ath10k_htc_svc_conn_req *conn_req,
+ struct ath10k_htc_svc_conn_resp *conn_resp)
+{
+ struct ath10k *ar = htc->ar;
+ struct ath10k_htc_msg *msg;
+ struct ath10k_htc_conn_svc *req_msg;
+ struct ath10k_htc_conn_svc_response resp_msg_dummy;
+ struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
+ enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
+ struct ath10k_htc_ep *ep;
+ struct sk_buff *skb;
+ unsigned int max_msg_size = 0;
+ int length, status;
+ unsigned long time_left;
+ bool disable_credit_flow_ctrl = false;
+ u16 message_id, service_id, flags = 0;
+ u8 tx_alloc = 0;
+
+ /* special case for HTC pseudo control service */
+ if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
+ disable_credit_flow_ctrl = true;
+ assigned_eid = ATH10K_HTC_EP_0;
+ max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
+ memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
+ goto setup;
+ }
+
+ tx_alloc = ath10k_htc_get_credit_allocation(htc,
+ conn_req->service_id);
+ if (!tx_alloc)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot htc service %s does not allocate target credits\n",
+ htc_service_name(conn_req->service_id));
+
+ skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
+ if (!skb) {
+ ath10k_err(ar, "Failed to allocate HTC packet\n");
+ return -ENOMEM;
+ }
+
+ length = sizeof(msg->hdr) + sizeof(msg->connect_service);
+ skb_put(skb, length);
+ memset(skb->data, 0, length);
+
+ msg = (struct ath10k_htc_msg *)skb->data;
+ msg->hdr.message_id =
+ __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
+
+ flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
+
+ /* Only enable credit flow control for WMI ctrl service */
+ if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
+ flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+ disable_credit_flow_ctrl = true;
+ }
+
+ req_msg = &msg->connect_service;
+ req_msg->flags = __cpu_to_le16(flags);
+ req_msg->service_id = __cpu_to_le16(conn_req->service_id);
+
+ reinit_completion(&htc->ctl_resp);
+
+ status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ /* wait for response */
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath10k_err(ar, "Service connect timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* we controlled the buffer creation, it's aligned */
+ msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
+ resp_msg = &msg->connect_service_response;
+ message_id = __le16_to_cpu(msg->hdr.message_id);
+ service_id = __le16_to_cpu(resp_msg->service_id);
+
+ if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
+ (htc->control_resp_len < sizeof(msg->hdr) +
+ sizeof(msg->connect_service_response))) {
+ ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
+ return -EPROTO;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
+ htc_service_name(service_id),
+ resp_msg->status, resp_msg->eid);
+
+ conn_resp->connect_resp_code = resp_msg->status;
+
+ /* check response status */
+ if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
+ ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
+ htc_service_name(service_id),
+ resp_msg->status);
+ return -EPROTO;
+ }
+
+ assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
+ max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
+
+setup:
+
+ if (assigned_eid >= ATH10K_HTC_EP_COUNT)
+ return -EPROTO;
+
+ if (max_msg_size == 0)
+ return -EPROTO;
+
+ ep = &htc->endpoint[assigned_eid];
+ ep->eid = assigned_eid;
+
+ if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
+ return -EPROTO;
+
+ /* return assigned endpoint to caller */
+ conn_resp->eid = assigned_eid;
+ conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
+
+ /* setup the endpoint */
+ ep->service_id = conn_req->service_id;
+ ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
+ ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
+ ep->tx_credits = tx_alloc;
+ ep->tx_credit_size = htc->target_credit_size;
+
+ if (conn_req->service_id == ATH10K_HTC_SVC_ID_HTT_DATA_MSG &&
+ htc->alt_data_credit_size != 0)
+ ep->tx_credit_size = htc->alt_data_credit_size;
+
+ /* copy all the callbacks */
+ ep->ep_ops = conn_req->ep_ops;
+
+ status = ath10k_hif_map_service_to_pipe(htc->ar,
+ ep->service_id,
+ &ep->ul_pipe_id,
+ &ep->dl_pipe_id);
+ if (status) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC service id: %d\n",
+ ep->service_id);
+ return status;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
+ htc_service_name(ep->service_id), ep->ul_pipe_id,
+ ep->dl_pipe_id, ep->eid);
+
+ if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
+ ep->tx_credit_flow_enabled = false;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot htc service '%s' eid %d TX flow control disabled\n",
+ htc_service_name(ep->service_id), assigned_eid);
+ }
+
+ return status;
+}
+
+struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
+
+ /* FW/HTC requires 4-byte aligned streams */
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath10k_warn(ar, "Unaligned HTC tx skb\n");
+
+ return skb;
+}
+
+static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+}
+
+static int ath10k_htc_pktlog_connect(struct ath10k *ar)
+{
+ struct ath10k_htc_svc_conn_resp conn_resp;
+ struct ath10k_htc_svc_conn_req conn_req;
+ int status;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ conn_req.ep_ops.ep_tx_complete = NULL;
+ conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx;
+ conn_req.ep_ops.ep_tx_credits = NULL;
+
+ /* connect to control service */
+ conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG;
+ status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
+ if (status) {
+ ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
+{
+ u8 ul_pipe_id;
+ u8 dl_pipe_id;
+ int status;
+
+ status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG,
+ &ul_pipe_id,
+ &dl_pipe_id);
+ if (status) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC pktlog service id: %d\n",
+ ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
+
+ return false;
+ }
+
+ return true;
+}
+
+int ath10k_htc_start(struct ath10k_htc *htc)
+{
+ struct ath10k *ar = htc->ar;
+ struct sk_buff *skb;
+ int status = 0;
+ struct ath10k_htc_msg *msg;
+
+ skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
+ memset(skb->data, 0, skb->len);
+
+ msg = (struct ath10k_htc_msg *)skb->data;
+ msg->hdr.message_id =
+ __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+ if (ar->hif.bus == ATH10K_BUS_SDIO) {
+ /* Extra setup params used by SDIO */
+ msg->setup_complete_ext.flags =
+ __cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN);
+ msg->setup_complete_ext.max_msgs_per_bundled_recv =
+ htc->max_msgs_per_htc_bundle;
+ }
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
+
+ status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ if (ath10k_htc_pktlog_svc_supported(ar)) {
+ status = ath10k_htc_pktlog_connect(ar);
+ if (status) {
+ ath10k_err(ar, "failed to connect to pktlog: %d\n", status);
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+/* registered target arrival callback from the HIF layer */
+int ath10k_htc_init(struct ath10k *ar)
+{
+ int status;
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_svc_conn_req conn_req;
+ struct ath10k_htc_svc_conn_resp conn_resp;
+
+ spin_lock_init(&htc->tx_lock);
+
+ ath10k_htc_reset_endpoint_states(htc);
+
+ htc->ar = ar;
+
+ /* setup our pseudo HTC control endpoint connection */
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+ conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
+ conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
+ conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
+
+ /* connect fake service */
+ status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
+ if (status) {
+ ath10k_err(ar, "could not connect to htc service (%d)\n",
+ status);
+ return status;
+ }
+
+ init_completion(&htc->ctl_resp);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
new file mode 100644
index 000000000000..7ff665020015
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -0,0 +1,421 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
+ */
+
+#ifndef _HTC_H_
+#define _HTC_H_
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/bitfield.h>
+
+struct ath10k;
+
+/****************/
+/* HTC protocol */
+/****************/
+
+/*
+ * HTC - host-target control protocol
+ *
+ * tx packets are generally <htc_hdr><payload>
+ * rx packets are more complex: <htc_hdr><payload><trailer>
+ *
+ * The payload + trailer length is stored in len.
+ * To get payload-only length one needs to payload - trailer_len.
+ *
+ * Trailer contains (possibly) multiple <htc_record>.
+ * Each record is a id-len-value.
+ *
+ * HTC header flags, control_byte0, control_byte1
+ * have different meaning depending whether its tx
+ * or rx.
+ *
+ * Alignment: htc_hdr, payload and trailer are
+ * 4-byte aligned.
+ */
+
+#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 32
+
+enum ath10k_htc_tx_flags {
+ ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
+ ATH10K_HTC_FLAG_SEND_BUNDLE = 0x02
+};
+
+enum ath10k_htc_rx_flags {
+ ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK = 0x01,
+ ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
+};
+
+#define ATH10K_HTC_FLAG_BUNDLE_MASK GENMASK(7, 4)
+
+/* bits 2-3 are for extra bundle count bits 4-5 */
+#define ATH10K_HTC_BUNDLE_EXTRA_MASK GENMASK(3, 2)
+#define ATH10K_HTC_BUNDLE_EXTRA_SHIFT 4
+
+static inline unsigned int ath10k_htc_get_bundle_count(u8 max_msgs, u8 flags)
+{
+ unsigned int count, extra_count = 0;
+
+ count = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, flags);
+
+ if (max_msgs > 16)
+ extra_count = FIELD_GET(ATH10K_HTC_BUNDLE_EXTRA_MASK, flags) <<
+ ATH10K_HTC_BUNDLE_EXTRA_SHIFT;
+
+ return count + extra_count;
+}
+
+struct ath10k_htc_hdr {
+ u8 eid; /* @enum ath10k_htc_ep_id */
+ u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */
+ __le16 len;
+ union {
+ u8 trailer_len; /* for rx */
+ u8 control_byte0;
+ } __packed;
+ union {
+ u8 seq_no; /* for tx */
+ u8 control_byte1;
+ } __packed;
+ union {
+ __le16 pad_len;
+ struct {
+ u8 pad0;
+ u8 pad1;
+ } __packed;
+ } __packed;
+
+} __packed __aligned(4);
+
+enum ath10k_ath10k_htc_msg_id {
+ ATH10K_HTC_MSG_READY_ID = 1,
+ ATH10K_HTC_MSG_CONNECT_SERVICE_ID = 2,
+ ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
+ ATH10K_HTC_MSG_SETUP_COMPLETE_ID = 4,
+ ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
+ ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6
+};
+
+enum ath10k_htc_version {
+ ATH10K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
+ ATH10K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
+};
+
+enum ath10k_htc_conn_flags {
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0,
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
+#define ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK 0x3
+ ATH10K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2,
+ ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
+#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_MASK 0xFF00
+#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8
+};
+
+#define ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK 0xFFF
+
+enum ath10k_htc_conn_svc_status {
+ ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
+ ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
+ ATH10K_HTC_CONN_SVC_STATUS_FAILED = 2,
+ ATH10K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
+ ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
+};
+
+#define ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE 32
+#define ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE 2
+#define ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE 2
+
+enum ath10k_htc_setup_complete_flags {
+ ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN = 1
+};
+
+struct ath10k_ath10k_htc_msg_hdr {
+ __le16 message_id; /* @enum htc_message_id */
+} __packed;
+
+struct ath10k_htc_unknown {
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_ready {
+ __le16 credit_count;
+ __le16 credit_size;
+ u8 max_endpoints;
+ u8 pad0;
+} __packed;
+
+struct ath10k_htc_ready_extended {
+ struct ath10k_htc_ready base;
+ u8 htc_version; /* @enum ath10k_htc_version */
+ u8 max_msgs_per_htc_bundle;
+ union {
+ __le16 reserved;
+ struct {
+ u8 pad0;
+ u8 pad1;
+ } __packed;
+ } __packed;
+
+} __packed;
+
+struct ath10k_htc_conn_svc {
+ __le16 service_id;
+ __le16 flags; /* @enum ath10k_htc_conn_flags */
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_conn_svc_response {
+ __le16 service_id;
+ u8 status; /* @enum ath10k_htc_conn_svc_status */
+ u8 eid;
+ __le16 max_msg_size;
+} __packed;
+
+struct ath10k_htc_setup_complete_extended {
+ u8 pad0;
+ u8 pad1;
+ __le32 flags; /* @enum htc_setup_complete_flags */
+ u8 max_msgs_per_bundled_recv;
+ u8 pad2;
+ u8 pad3;
+ u8 pad4;
+} __packed;
+
+struct ath10k_htc_msg {
+ struct ath10k_ath10k_htc_msg_hdr hdr;
+ union {
+ /* host-to-target */
+ struct ath10k_htc_conn_svc connect_service;
+ struct ath10k_htc_ready ready;
+ struct ath10k_htc_ready_extended ready_ext;
+ struct ath10k_htc_unknown unknown;
+ struct ath10k_htc_setup_complete_extended setup_complete_ext;
+
+ /* target-to-host */
+ struct ath10k_htc_conn_svc_response connect_service_response;
+ };
+} __packed __aligned(4);
+
+enum ath10k_ath10k_htc_record_id {
+ ATH10K_HTC_RECORD_NULL = 0,
+ ATH10K_HTC_RECORD_CREDITS = 1,
+ ATH10K_HTC_RECORD_LOOKAHEAD = 2,
+ ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE = 3,
+};
+
+struct ath10k_ath10k_htc_record_hdr {
+ u8 id; /* @enum ath10k_ath10k_htc_record_id */
+ u8 len;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_credit_report {
+ u8 eid; /* @enum ath10k_htc_ep_id */
+ u8 credits;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_lookahead_report {
+ u8 pre_valid;
+ u8 pad0;
+ u8 pad1;
+ u8 pad2;
+ u8 lookahead[4];
+ u8 post_valid;
+ u8 pad3;
+ u8 pad4;
+ u8 pad5;
+} __packed;
+
+struct ath10k_htc_lookahead_bundle {
+ u8 lookahead[4];
+} __packed;
+
+struct ath10k_htc_record {
+ struct ath10k_ath10k_htc_record_hdr hdr;
+ union {
+ DECLARE_FLEX_ARRAY(struct ath10k_htc_credit_report, credit_report);
+ DECLARE_FLEX_ARRAY(struct ath10k_htc_lookahead_report, lookahead_report);
+ DECLARE_FLEX_ARRAY(struct ath10k_htc_lookahead_bundle, lookahead_bundle);
+ };
+} __packed __aligned(4);
+
+/*******************/
+/* Host-side stuff */
+/*******************/
+
+enum ath10k_htc_svc_gid {
+ ATH10K_HTC_SVC_GRP_RSVD = 0,
+ ATH10K_HTC_SVC_GRP_WMI = 1,
+ ATH10K_HTC_SVC_GRP_NMI = 2,
+ ATH10K_HTC_SVC_GRP_HTT = 3,
+ ATH10K_LOG_SERVICE_GROUP = 6,
+
+ ATH10K_HTC_SVC_GRP_TEST = 254,
+ ATH10K_HTC_SVC_GRP_LAST = 255,
+};
+
+#define SVC(group, idx) \
+ (int)(((int)(group) << 8) | (int)(idx))
+
+enum ath10k_htc_svc_id {
+ /* NOTE: service ID of 0x0000 is reserved and should never be used */
+ ATH10K_HTC_SVC_ID_RESERVED = 0x0000,
+ ATH10K_HTC_SVC_ID_UNUSED = ATH10K_HTC_SVC_ID_RESERVED,
+
+ ATH10K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH10K_HTC_SVC_GRP_RSVD, 1),
+ ATH10K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_WMI, 0),
+ ATH10K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH10K_HTC_SVC_GRP_WMI, 1),
+ ATH10K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH10K_HTC_SVC_GRP_WMI, 2),
+ ATH10K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH10K_HTC_SVC_GRP_WMI, 3),
+ ATH10K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH10K_HTC_SVC_GRP_WMI, 4),
+
+ ATH10K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_NMI, 0),
+ ATH10K_HTC_SVC_ID_NMI_DATA = SVC(ATH10K_HTC_SVC_GRP_NMI, 1),
+
+ ATH10K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 0),
+
+ ATH10K_HTC_SVC_ID_HTT_DATA2_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 1),
+ ATH10K_HTC_SVC_ID_HTT_DATA3_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 2),
+ ATH10K_HTC_SVC_ID_HTT_LOG_MSG = SVC(ATH10K_LOG_SERVICE_GROUP, 0),
+ /* raw stream service (i.e. flash, tcmd, calibration apps) */
+ ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0),
+};
+
+#undef SVC
+
+enum ath10k_htc_ep_id {
+ ATH10K_HTC_EP_UNUSED = -1,
+ ATH10K_HTC_EP_0 = 0,
+ ATH10K_HTC_EP_1 = 1,
+ ATH10K_HTC_EP_2,
+ ATH10K_HTC_EP_3,
+ ATH10K_HTC_EP_4,
+ ATH10K_HTC_EP_5,
+ ATH10K_HTC_EP_6,
+ ATH10K_HTC_EP_7,
+ ATH10K_HTC_EP_8,
+ ATH10K_HTC_EP_COUNT,
+};
+
+struct ath10k_htc_ops {
+ void (*target_send_suspend_complete)(struct ath10k *ar);
+};
+
+struct ath10k_htc_ep_ops {
+ void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
+ void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
+ void (*ep_tx_credits)(struct ath10k *);
+};
+
+/* service connection information */
+struct ath10k_htc_svc_conn_req {
+ u16 service_id;
+ struct ath10k_htc_ep_ops ep_ops;
+ int max_send_queue_depth;
+};
+
+/* service connection response information */
+struct ath10k_htc_svc_conn_resp {
+ u8 buffer_len;
+ u8 actual_len;
+ enum ath10k_htc_ep_id eid;
+ unsigned int max_msg_len;
+ u8 connect_resp_code;
+};
+
+#define ATH10K_NUM_CONTROL_TX_BUFFERS 2
+#define ATH10K_HTC_MAX_LEN 4096
+#define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
+#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
+#define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
+ sizeof(struct ath10k_htc_hdr))
+#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
+
+struct ath10k_htc_ep {
+ struct ath10k_htc *htc;
+ enum ath10k_htc_ep_id eid;
+ enum ath10k_htc_svc_id service_id;
+ struct ath10k_htc_ep_ops ep_ops;
+
+ int max_tx_queue_depth;
+ int max_ep_message_len;
+ u8 ul_pipe_id;
+ u8 dl_pipe_id;
+
+ u8 seq_no; /* for debugging */
+ int tx_credits;
+ int tx_credit_size;
+ bool tx_credit_flow_enabled;
+ bool bundle_tx;
+ struct sk_buff_head tx_req_head;
+ struct sk_buff_head tx_complete_head;
+
+};
+
+struct ath10k_htc_svc_tx_credits {
+ u16 service_id;
+ u8 credit_allocation;
+};
+
+struct ath10k_htc {
+ struct ath10k *ar;
+ struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
+
+ /* protects endpoints */
+ spinlock_t tx_lock;
+
+ struct ath10k_htc_ops htc_ops;
+
+ u8 control_resp_buffer[ATH10K_HTC_MAX_CTRL_MSG_LEN];
+ int control_resp_len;
+
+ struct completion ctl_resp;
+
+ int total_transmit_credits;
+ int target_credit_size;
+ u8 max_msgs_per_htc_bundle;
+ int alt_data_credit_size;
+};
+
+int ath10k_htc_init(struct ath10k *ar);
+int ath10k_htc_wait_target(struct ath10k_htc *htc);
+void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep);
+int ath10k_htc_start(struct ath10k_htc *htc);
+int ath10k_htc_connect_service(struct ath10k_htc *htc,
+ struct ath10k_htc_svc_conn_req *conn_req,
+ struct ath10k_htc_svc_conn_resp *conn_resp);
+void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
+ enum ath10k_htc_ep_id eid,
+ bool enable);
+int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
+ struct sk_buff *packet);
+void ath10k_htc_stop_hl(struct ath10k *ar);
+
+int ath10k_htc_send_hl(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
+ struct sk_buff *packet);
+struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
+void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
+ struct sk_buff *skb);
+int ath10k_htc_process_trailer(struct ath10k_htc *htc,
+ u8 *buffer,
+ int length,
+ enum ath10k_htc_ep_id src_eid,
+ void *next_lookaheads,
+ int *next_lookaheads_len);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
new file mode 100644
index 000000000000..dbaf262cd7c1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/if_ether.h>
+
+#include "htt.h"
+#include "core.h"
+#include "debug.h"
+#include "hif.h"
+
+static const enum htt_t2h_msg_type htt_main_t2h_msg_types[] = {
+ [HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_MAIN_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_MAIN_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND] =
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ [HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
+static const enum htt_t2h_msg_type htt_10x_t2h_msg_types[] = {
+ [HTT_10X_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_10X_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_10X_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_10X_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_10X_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_10X_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_10X_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_10X_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_10X_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_10X_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ [HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_10X_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+ [HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ [HTT_10X_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+ [HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD] = HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+ [HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+};
+
+static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
+ [HTT_TLV_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_TLV_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_TLV_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_TLV_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_TLV_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_TLV_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_TLV_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_TLV_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ [HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ [HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND] =
+ HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+ [HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE] =
+ HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+ [HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ [HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR] =
+ HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+ [HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
+static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
+ [HTT_10_4_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_10_4_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_10_4_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_10_4_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_10_4_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_10_4_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_10_4_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ [HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND] =
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ [HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ [HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ [HTT_10_4_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+ [HTT_10_4_T2H_MSG_TYPE_EN_STATS] = HTT_T2H_MSG_TYPE_EN_STATS,
+ [HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] =
+ HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+ [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM] =
+ HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
+ [HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] =
+ HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+ [HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND] =
+ HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
+ [HTT_10_4_T2H_MSG_TYPE_PEER_STATS] =
+ HTT_T2H_MSG_TYPE_PEER_STATS,
+};
+
+const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops = {
+ .rx_desc_size = sizeof(struct htt_rx_desc_v1),
+ .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v1, msdu_payload)
+};
+
+static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc = container_of(rxd,
+ struct htt_rx_desc_v1,
+ base);
+
+ return MS(__le32_to_cpu(rx_desc->msdu_end.qca99x0.info1),
+ RX_MSDU_END_INFO1_L3_HDR_PAD);
+}
+
+static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc = container_of(rxd,
+ struct htt_rx_desc_v1,
+ base);
+
+ return !!(rx_desc->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_MSDU_LIMIT_ERR));
+}
+
+const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops = {
+ .rx_desc_size = sizeof(struct htt_rx_desc_v1),
+ .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v1, msdu_payload),
+
+ .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
+};
+
+static void ath10k_rx_desc_wcn3990_get_offsets(struct htt_rx_ring_rx_desc_offsets *off)
+{
+#define desc_offset(x) (offsetof(struct htt_rx_desc_v2, x) / 4)
+ off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ off->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static struct htt_rx_desc *
+ath10k_rx_desc_wcn3990_from_raw_buffer(void *buff)
+{
+ return &((struct htt_rx_desc_v2 *)buff)->base;
+}
+
+static struct rx_attention *
+ath10k_rx_desc_wcn3990_get_attention(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->attention;
+}
+
+static struct rx_frag_info_common *
+ath10k_rx_desc_wcn3990_get_frag_info(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->frag_info.common;
+}
+
+static struct rx_mpdu_start *
+ath10k_rx_desc_wcn3990_get_mpdu_start(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->mpdu_start;
+}
+
+static struct rx_mpdu_end *
+ath10k_rx_desc_wcn3990_get_mpdu_end(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->mpdu_end;
+}
+
+static struct rx_msdu_start_common *
+ath10k_rx_desc_wcn3990_get_msdu_start(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->msdu_start.common;
+}
+
+static struct rx_msdu_end_common *
+ath10k_rx_desc_wcn3990_get_msdu_end(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->msdu_end.common;
+}
+
+static struct rx_ppdu_start *
+ath10k_rx_desc_wcn3990_get_ppdu_start(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->ppdu_start;
+}
+
+static struct rx_ppdu_end_common *
+ath10k_rx_desc_wcn3990_get_ppdu_end(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return &rx_desc->ppdu_end.common;
+}
+
+static u8 *
+ath10k_rx_desc_wcn3990_get_rx_hdr_status(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return rx_desc->rx_hdr_status;
+}
+
+static u8 *
+ath10k_rx_desc_wcn3990_get_msdu_payload(struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v2 *rx_desc = container_of(rxd, struct htt_rx_desc_v2, base);
+
+ return rx_desc->msdu_payload;
+}
+
+const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops = {
+ .rx_desc_size = sizeof(struct htt_rx_desc_v2),
+ .rx_desc_msdu_payload_offset = offsetof(struct htt_rx_desc_v2, msdu_payload),
+
+ .rx_desc_from_raw_buffer = ath10k_rx_desc_wcn3990_from_raw_buffer,
+ .rx_desc_get_offsets = ath10k_rx_desc_wcn3990_get_offsets,
+ .rx_desc_get_attention = ath10k_rx_desc_wcn3990_get_attention,
+ .rx_desc_get_frag_info = ath10k_rx_desc_wcn3990_get_frag_info,
+ .rx_desc_get_mpdu_start = ath10k_rx_desc_wcn3990_get_mpdu_start,
+ .rx_desc_get_mpdu_end = ath10k_rx_desc_wcn3990_get_mpdu_end,
+ .rx_desc_get_msdu_start = ath10k_rx_desc_wcn3990_get_msdu_start,
+ .rx_desc_get_msdu_end = ath10k_rx_desc_wcn3990_get_msdu_end,
+ .rx_desc_get_ppdu_start = ath10k_rx_desc_wcn3990_get_ppdu_start,
+ .rx_desc_get_ppdu_end = ath10k_rx_desc_wcn3990_get_ppdu_end,
+ .rx_desc_get_rx_hdr_status = ath10k_rx_desc_wcn3990_get_rx_hdr_status,
+ .rx_desc_get_msdu_payload = ath10k_rx_desc_wcn3990_get_msdu_payload,
+};
+
+int ath10k_htt_connect(struct ath10k_htt *htt)
+{
+ struct ath10k_htc_svc_conn_req conn_req;
+ struct ath10k_htc_svc_conn_resp conn_resp;
+ struct ath10k *ar = htt->ar;
+ struct ath10k_htc_ep *ep;
+ int status;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler;
+ conn_req.ep_ops.ep_tx_credits = ath10k_htt_op_ep_tx_credits;
+
+ /* connect to control service */
+ conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
+
+ status = ath10k_htc_connect_service(&htt->ar->htc, &conn_req,
+ &conn_resp);
+
+ if (status)
+ return status;
+
+ htt->eid = conn_resp.eid;
+
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
+ ep = &ar->htc.endpoint[htt->eid];
+ ath10k_htc_setup_tx_req(ep);
+ }
+
+ htt->disable_tx_comp = ath10k_hif_get_htt_tx_complete(htt->ar);
+ if (htt->disable_tx_comp)
+ ath10k_htc_change_tx_credit_flow(&htt->ar->htc, htt->eid, true);
+
+ return 0;
+}
+
+int ath10k_htt_init(struct ath10k *ar)
+{
+ struct ath10k_htt *htt = &ar->htt;
+
+ htt->ar = ar;
+
+ /*
+ * Prefetch enough data to satisfy target
+ * classification engine.
+ * This is for LL chips. HL chips will probably
+ * transfer all frame in the tx fragment.
+ */
+ htt->prefetch_len =
+ 36 + /* 802.11 + qos + ht */
+ 4 + /* 802.1q */
+ 8 + /* llc snap */
+ 2; /* ip4 dscp or ip6 priority */
+
+ switch (ar->running_fw->fw_file.htt_op_version) {
+ case ATH10K_FW_HTT_OP_VERSION_10_4:
+ ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_10_1:
+ ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_TLV:
+ ar->htt.t2h_msg_types = htt_tlv_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_TLV_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_MAIN:
+ ar->htt.t2h_msg_types = htt_main_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_MAIN_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_MAX:
+ case ATH10K_FW_HTT_OP_VERSION_UNSET:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ ath10k_htt_set_tx_ops(htt);
+ ath10k_htt_set_rx_ops(htt);
+
+ return 0;
+}
+
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
+
+static int ath10k_htt_verify_version(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt target version %d.%d\n",
+ htt->target_version_major, htt->target_version_minor);
+
+ if (htt->target_version_major != 2 &&
+ htt->target_version_major != 3) {
+ ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n",
+ htt->target_version_major);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_setup(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ int status;
+
+ init_completion(&htt->target_version_received);
+
+ status = ath10k_htt_h2t_ver_req_msg(htt);
+ if (status)
+ return status;
+
+ status = wait_for_completion_timeout(&htt->target_version_received,
+ HTT_TARGET_VERSION_TIMEOUT_HZ);
+ if (status == 0) {
+ ath10k_warn(ar, "htt version request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ status = ath10k_htt_verify_version(htt);
+ if (status) {
+ ath10k_warn(ar, "failed to verify htt version: %d\n",
+ status);
+ return status;
+ }
+
+ status = ath10k_htt_send_frag_desc_bank_cfg(htt);
+ if (status)
+ return status;
+
+ status = ath10k_htt_send_rx_ring_cfg(htt);
+ if (status) {
+ ath10k_warn(ar, "failed to setup rx ring: %d\n",
+ status);
+ return status;
+ }
+
+ status = ath10k_htt_h2t_aggr_cfg_msg(htt,
+ htt->max_num_ampdu,
+ htt->max_num_amsdu);
+ if (status) {
+ ath10k_warn(ar, "failed to setup amsdu/ampdu limit: %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
new file mode 100644
index 000000000000..603f6de62b0a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -0,0 +1,2465 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _HTT_H_
+#define _HTT_H_
+
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/dmapool.h>
+#include <linux/hashtable.h>
+#include <linux/kfifo.h>
+#include <net/mac80211.h>
+
+#include "htc.h"
+#include "hw.h"
+#include "rx_desc.h"
+
+enum htt_dbg_stats_type {
+ HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
+ HTT_DBG_STATS_RX_REORDER = 1 << 1,
+ HTT_DBG_STATS_RX_RATE_INFO = 1 << 2,
+ HTT_DBG_STATS_TX_PPDU_LOG = 1 << 3,
+ HTT_DBG_STATS_TX_RATE_INFO = 1 << 4,
+ /* bits 5-23 currently reserved */
+
+ HTT_DBG_NUM_STATS /* keep this last */
+};
+
+enum htt_h2t_msg_type { /* host-to-target */
+ HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
+ HTT_H2T_MSG_TYPE_TX_FRM = 1,
+ HTT_H2T_MSG_TYPE_RX_RING_CFG = 2,
+ HTT_H2T_MSG_TYPE_STATS_REQ = 3,
+ HTT_H2T_MSG_TYPE_SYNC = 4,
+ HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
+ HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
+
+ /* This command is used for sending management frames in HTT < 3.0.
+ * HTT >= 3.0 uses TX_FRM for everything.
+ */
+ HTT_H2T_MSG_TYPE_MGMT_TX = 7,
+ HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11,
+
+ HTT_H2T_NUM_MSGS /* keep this last */
+};
+
+struct htt_cmd_hdr {
+ u8 msg_type;
+} __packed;
+
+struct htt_ver_req {
+ u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
+} __packed;
+
+/*
+ * HTT tx MSDU descriptor
+ *
+ * The HTT tx MSDU descriptor is created by the host HTT SW for each
+ * tx MSDU. The HTT tx MSDU descriptor contains the information that
+ * the target firmware needs for the FW's tx processing, particularly
+ * for creating the HW msdu descriptor.
+ * The same HTT tx descriptor is used for HL and LL systems, though
+ * a few fields within the tx descriptor are used only by LL or
+ * only by HL.
+ * The HTT tx descriptor is defined in two manners: by a struct with
+ * bitfields, and by a series of [dword offset, bit mask, bit shift]
+ * definitions.
+ * The target should use the struct def, for simplicity and clarity,
+ * but the host shall use the bit-mast + bit-shift defs, to be endian-
+ * neutral. Specifically, the host shall use the get/set macros built
+ * around the mask + shift defs.
+ */
+struct htt_data_tx_desc_frag {
+ union {
+ struct double_word_addr {
+ __le32 paddr;
+ __le32 len;
+ } __packed dword_addr;
+ struct triple_word_addr {
+ __le32 paddr_lo;
+ __le16 paddr_hi;
+ __le16 len_16;
+ } __packed tword_addr;
+ } __packed;
+} __packed;
+
+struct htt_msdu_ext_desc {
+ __le32 tso_flag[3];
+ __le16 ip_identification;
+ u8 flags;
+ u8 reserved;
+ struct htt_data_tx_desc_frag frags[6];
+};
+
+struct htt_msdu_ext_desc_64 {
+ __le32 tso_flag[5];
+ __le16 ip_identification;
+ u8 flags;
+ u8 reserved;
+ struct htt_data_tx_desc_frag frags[6];
+};
+
+#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4)
+
+#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
+
+#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 BIT(16)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 BIT(17)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 BIT(18)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 BIT(19)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64 BIT(20)
+#define HTT_MSDU_EXT_DESC_FLAG_PARTIAL_CSUM_ENABLE_64 BIT(21)
+
+#define HTT_MSDU_CHECKSUM_ENABLE_64 (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 \
+ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 \
+ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 \
+ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 \
+ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64)
+
+enum htt_data_tx_desc_flags0 {
+ HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
+ HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
+ HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT = 1 << 2,
+ HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY = 1 << 3,
+ HTT_DATA_TX_DESC_FLAGS0_RSVD0 = 1 << 4
+#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0
+#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5
+};
+
+enum htt_data_tx_desc_flags1 {
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB 0
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB 6
+ HTT_DATA_TX_DESC_FLAGS1_POSTPONED = 1 << 11,
+ HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12,
+ HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
+ HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
+ HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE = 1 << 15
+};
+
+#define HTT_TX_CREDIT_DELTA_ABS_M 0xffff0000
+#define HTT_TX_CREDIT_DELTA_ABS_S 16
+#define HTT_TX_CREDIT_DELTA_ABS_GET(word) \
+ (((word) & HTT_TX_CREDIT_DELTA_ABS_M) >> HTT_TX_CREDIT_DELTA_ABS_S)
+
+#define HTT_TX_CREDIT_SIGN_BIT_M 0x00000100
+#define HTT_TX_CREDIT_SIGN_BIT_S 8
+#define HTT_TX_CREDIT_SIGN_BIT_GET(word) \
+ (((word) & HTT_TX_CREDIT_SIGN_BIT_M) >> HTT_TX_CREDIT_SIGN_BIT_S)
+
+enum htt_data_tx_ext_tid {
+ HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
+ HTT_DATA_TX_EXT_TID_MGMT = 17,
+ HTT_DATA_TX_EXT_TID_INVALID = 31
+};
+
+#define HTT_INVALID_PEERID 0xFFFF
+
+/*
+ * htt_data_tx_desc - used for data tx path
+ *
+ * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1.
+ * ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_
+ * for special kinds of tids
+ * postponed: only for HL hosts. indicates if this is a resend
+ * (HL hosts manage queues on the host )
+ * more_in_batch: only for HL hosts. indicates if more packets are
+ * pending. this allows target to wait and aggregate
+ * freq: 0 means home channel of given vdev. intended for offchannel
+ */
+struct htt_data_tx_desc {
+ u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
+ __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
+ __le16 len;
+ __le16 id;
+ __le32 frags_paddr;
+ union {
+ __le32 peerid;
+ struct {
+ __le16 peerid;
+ __le16 freq;
+ } __packed offchan_tx;
+ } __packed;
+ u8 prefetch[0]; /* start of frame, for FW classification engine */
+} __packed;
+
+struct htt_data_tx_desc_64 {
+ u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
+ __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
+ __le16 len;
+ __le16 id;
+ __le64 frags_paddr;
+ union {
+ __le32 peerid;
+ struct {
+ __le16 peerid;
+ __le16 freq;
+ } __packed offchan_tx;
+ } __packed;
+ u8 prefetch[0]; /* start of frame, for FW classification engine */
+} __packed;
+
+enum htt_rx_ring_flags {
+ HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
+ HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
+ HTT_RX_RING_FLAGS_PPDU_START = 1 << 2,
+ HTT_RX_RING_FLAGS_PPDU_END = 1 << 3,
+ HTT_RX_RING_FLAGS_MPDU_START = 1 << 4,
+ HTT_RX_RING_FLAGS_MPDU_END = 1 << 5,
+ HTT_RX_RING_FLAGS_MSDU_START = 1 << 6,
+ HTT_RX_RING_FLAGS_MSDU_END = 1 << 7,
+ HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8,
+ HTT_RX_RING_FLAGS_FRAG_INFO = 1 << 9,
+ HTT_RX_RING_FLAGS_UNICAST_RX = 1 << 10,
+ HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11,
+ HTT_RX_RING_FLAGS_CTRL_RX = 1 << 12,
+ HTT_RX_RING_FLAGS_MGMT_RX = 1 << 13,
+ HTT_RX_RING_FLAGS_NULL_RX = 1 << 14,
+ HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15
+};
+
+#define HTT_RX_RING_SIZE_MIN 128
+#define HTT_RX_RING_SIZE_MAX 2048
+#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
+#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
+#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
+
+struct htt_rx_ring_rx_desc_offsets {
+ /* the following offsets are in 4-byte units */
+ __le16 mac80211_hdr_offset;
+ __le16 msdu_payload_offset;
+ __le16 ppdu_start_offset;
+ __le16 ppdu_end_offset;
+ __le16 mpdu_start_offset;
+ __le16 mpdu_end_offset;
+ __le16 msdu_start_offset;
+ __le16 msdu_end_offset;
+ __le16 rx_attention_offset;
+ __le16 frag_info_offset;
+} __packed;
+
+struct htt_rx_ring_setup_ring32 {
+ __le32 fw_idx_shadow_reg_paddr;
+ __le32 rx_ring_base_paddr;
+ __le16 rx_ring_len; /* in 4-byte words */
+ __le16 rx_ring_bufsize; /* rx skb size - in bytes */
+ __le16 flags; /* %HTT_RX_RING_FLAGS_ */
+ __le16 fw_idx_init_val;
+
+ struct htt_rx_ring_rx_desc_offsets offsets;
+} __packed;
+
+struct htt_rx_ring_setup_ring64 {
+ __le64 fw_idx_shadow_reg_paddr;
+ __le64 rx_ring_base_paddr;
+ __le16 rx_ring_len; /* in 4-byte words */
+ __le16 rx_ring_bufsize; /* rx skb size - in bytes */
+ __le16 flags; /* %HTT_RX_RING_FLAGS_ */
+ __le16 fw_idx_init_val;
+
+ struct htt_rx_ring_rx_desc_offsets offsets;
+} __packed;
+
+struct htt_rx_ring_setup_hdr {
+ u8 num_rings; /* supported values: 1, 2 */
+ __le16 rsvd0;
+} __packed;
+
+struct htt_rx_ring_setup_32 {
+ struct htt_rx_ring_setup_hdr hdr;
+ struct htt_rx_ring_setup_ring32 rings[];
+} __packed;
+
+struct htt_rx_ring_setup_64 {
+ struct htt_rx_ring_setup_hdr hdr;
+ struct htt_rx_ring_setup_ring64 rings[];
+} __packed;
+
+/*
+ * htt_stats_req - request target to send specified statistics
+ *
+ * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ
+ * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually
+ * so make sure its little-endian.
+ * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually
+ * so make sure its little-endian.
+ * @cfg_val: stat_type specific configuration
+ * @stat_type: see %htt_dbg_stats_type
+ * @cookie_lsb: used for confirmation message from target->host
+ * @cookie_msb: ditto as %cookie
+ */
+struct htt_stats_req {
+ u8 upload_types[3];
+ u8 rsvd0;
+ u8 reset_types[3];
+ struct {
+ u8 mpdu_bytes;
+ u8 mpdu_num_msdus;
+ u8 msdu_bytes;
+ } __packed;
+ u8 stat_type;
+ __le32 cookie_lsb;
+ __le32 cookie_msb;
+} __packed;
+
+#define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
+#define HTT_STATS_BIT_MASK GENMASK(16, 0)
+
+/*
+ * htt_oob_sync_req - request out-of-band sync
+ *
+ * The HTT SYNC tells the target to suspend processing of subsequent
+ * HTT host-to-target messages until some other target agent locally
+ * informs the target HTT FW that the current sync counter is equal to
+ * or greater than (in a modulo sense) the sync counter specified in
+ * the SYNC message.
+ *
+ * This allows other host-target components to synchronize their operation
+ * with HTT, e.g. to ensure that tx frames don't get transmitted until a
+ * security key has been downloaded to and activated by the target.
+ * In the absence of any explicit synchronization counter value
+ * specification, the target HTT FW will use zero as the default current
+ * sync value.
+ *
+ * The HTT target FW will suspend its host->target message processing as long
+ * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128.
+ */
+struct htt_oob_sync_req {
+ u8 sync_count;
+ __le16 rsvd0;
+} __packed;
+
+struct htt_aggr_conf {
+ u8 max_num_ampdu_subframes;
+ /* amsdu_subframes is limited by 0x1F mask */
+ u8 max_num_amsdu_subframes;
+} __packed;
+
+struct htt_aggr_conf_v2 {
+ u8 max_num_ampdu_subframes;
+ /* amsdu_subframes is limited by 0x1F mask */
+ u8 max_num_amsdu_subframes;
+ u8 reserved;
+} __packed;
+
+#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
+struct htt_mgmt_tx_desc_qca99x0 {
+ __le32 rate;
+} __packed;
+
+struct htt_mgmt_tx_desc {
+ u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
+ __le32 msdu_paddr;
+ __le32 desc_id;
+ __le32 len;
+ __le32 vdev_id;
+ u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
+ union {
+ struct htt_mgmt_tx_desc_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
+enum htt_mgmt_tx_status {
+ HTT_MGMT_TX_STATUS_OK = 0,
+ HTT_MGMT_TX_STATUS_RETRY = 1,
+ HTT_MGMT_TX_STATUS_DROP = 2
+};
+
+/*=== target -> host messages ===============================================*/
+
+enum htt_main_t2h_msg_type {
+ HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_MAIN_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_MAIN_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_MAIN_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_MAIN_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_MAIN_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_MAIN_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
+ HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
+ HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND = 0x10,
+ HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
+ HTT_MAIN_T2H_MSG_TYPE_TEST,
+ /* keep this last */
+ HTT_MAIN_T2H_NUM_MSGS
+};
+
+enum htt_10x_t2h_msg_type {
+ HTT_10X_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_10X_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_10X_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_10X_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_10X_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_10X_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_10X_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_10X_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_10X_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_10X_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
+ HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_10X_T2H_MSG_TYPE_TEST = 0xe,
+ HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
+ HTT_10X_T2H_MSG_TYPE_AGGR_CONF = 0x11,
+ HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x12,
+ HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0x13,
+ /* keep this last */
+ HTT_10X_T2H_NUM_MSGS
+};
+
+enum htt_tlv_t2h_msg_type {
+ HTT_TLV_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_TLV_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_TLV_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_TLV_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_TLV_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_TLV_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_TLV_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_TLV_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_TLV_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, /* deprecated */
+ HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
+ HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
+ HTT_TLV_T2H_MSG_TYPE_RX_PN_IND = 0x10,
+ HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
+ HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
+ /* 0x13 reservd */
+ HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
+ HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE = 0x15,
+ HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR = 0x16,
+ HTT_TLV_T2H_MSG_TYPE_TEST,
+ /* keep this last */
+ HTT_TLV_T2H_NUM_MSGS
+};
+
+enum htt_10_4_t2h_msg_type {
+ HTT_10_4_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_10_4_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_10_4_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_10_4_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_10_4_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_10_4_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_10_4_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_10_4_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_10_4_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
+ HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
+ HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
+ HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0x10,
+ HTT_10_4_T2H_MSG_TYPE_RX_PN_IND = 0x11,
+ HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12,
+ HTT_10_4_T2H_MSG_TYPE_TEST = 0x13,
+ HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14,
+ HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15,
+ HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16,
+ HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM = 0x17,
+ HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18,
+ /* 0x19 to 0x2f are reserved */
+ HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND = 0x30,
+ HTT_10_4_T2H_MSG_TYPE_PEER_STATS = 0x31,
+ /* keep this last */
+ HTT_10_4_T2H_NUM_MSGS
+};
+
+enum htt_t2h_msg_type {
+ HTT_T2H_MSG_TYPE_VERSION_CONF,
+ HTT_T2H_MSG_TYPE_RX_IND,
+ HTT_T2H_MSG_TYPE_RX_FLUSH,
+ HTT_T2H_MSG_TYPE_PEER_MAP,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ HTT_T2H_MSG_TYPE_RX_ADDBA,
+ HTT_T2H_MSG_TYPE_RX_DELBA,
+ HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ HTT_T2H_MSG_TYPE_PKTLOG,
+ HTT_T2H_MSG_TYPE_STATS_CONF,
+ HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ HTT_T2H_MSG_TYPE_SEC_IND,
+ HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ HTT_T2H_MSG_TYPE_RX_PN_IND,
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+ HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+ HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+ HTT_T2H_MSG_TYPE_AGGR_CONF,
+ HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+ HTT_T2H_MSG_TYPE_TEST,
+ HTT_T2H_MSG_TYPE_EN_STATS,
+ HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+ HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
+ HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
+ HTT_T2H_MSG_TYPE_PEER_STATS,
+ /* keep this last */
+ HTT_T2H_NUM_MSGS
+};
+
+/*
+ * htt_resp_hdr - header for target-to-host messages
+ *
+ * msg_type: see htt_t2h_msg_type
+ */
+struct htt_resp_hdr {
+ u8 msg_type;
+} __packed;
+
+#define HTT_RESP_HDR_MSG_TYPE_OFFSET 0
+#define HTT_RESP_HDR_MSG_TYPE_MASK 0xff
+#define HTT_RESP_HDR_MSG_TYPE_LSB 0
+
+/* htt_ver_resp - response sent for htt_ver_req */
+struct htt_ver_resp {
+ u8 minor;
+ u8 major;
+ u8 rsvd0;
+} __packed;
+
+#define HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI BIT(0)
+
+#define HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK GENMASK(7, 0)
+
+struct htt_mgmt_tx_completion {
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 flags;
+ __le32 desc_id;
+ __le32 status;
+ __le32 ppdu_id;
+ __le32 info;
+} __packed;
+
+#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F)
+#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0)
+#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5)
+#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6)
+#define HTT_RX_INDICATION_INFO0_PPDU_DURATION BIT(7)
+
+#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F
+#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0
+#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK 0x00000FC0
+#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB 6
+#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000
+#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB 12
+#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK 0x00FC0000
+#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB 18
+#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000
+#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24
+
+#define HTT_TX_CMPL_FLAG_DATA_RSSI BIT(0)
+#define HTT_TX_CMPL_FLAG_PPID_PRESENT BIT(1)
+#define HTT_TX_CMPL_FLAG_PA_PRESENT BIT(2)
+#define HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT BIT(3)
+
+#define HTT_TX_DATA_RSSI_ENABLE_WCN3990 BIT(3)
+#define HTT_TX_DATA_APPEND_RETRIES BIT(0)
+#define HTT_TX_DATA_APPEND_TIMESTAMP BIT(1)
+
+struct htt_rx_indication_hdr {
+ u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
+ __le16 peer_id;
+ __le32 info1; /* %HTT_RX_INDICATION_INFO1_ */
+} __packed;
+
+#define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID (1 << 0)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB (1)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK (1 << 5)
+#define HTT_RX_INDICATION_INFO0_END_VALID (1 << 6)
+#define HTT_RX_INDICATION_INFO0_START_VALID (1 << 7)
+
+#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK 0x00FFFFFF
+#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB 0
+#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000
+#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB 24
+
+#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF
+#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB 0
+#define HTT_RX_INDICATION_INFO2_SERVICE_MASK 0xFF000000
+#define HTT_RX_INDICATION_INFO2_SERVICE_LSB 24
+
+enum htt_rx_legacy_rate {
+ HTT_RX_OFDM_48 = 0,
+ HTT_RX_OFDM_24 = 1,
+ HTT_RX_OFDM_12,
+ HTT_RX_OFDM_6,
+ HTT_RX_OFDM_54,
+ HTT_RX_OFDM_36,
+ HTT_RX_OFDM_18,
+ HTT_RX_OFDM_9,
+
+ /* long preamble */
+ HTT_RX_CCK_11_LP = 0,
+ HTT_RX_CCK_5_5_LP = 1,
+ HTT_RX_CCK_2_LP,
+ HTT_RX_CCK_1_LP,
+ /* short preamble */
+ HTT_RX_CCK_11_SP,
+ HTT_RX_CCK_5_5_SP,
+ HTT_RX_CCK_2_SP
+};
+
+enum htt_rx_legacy_rate_type {
+ HTT_RX_LEGACY_RATE_OFDM = 0,
+ HTT_RX_LEGACY_RATE_CCK
+};
+
+enum htt_rx_preamble_type {
+ HTT_RX_LEGACY = 0x4,
+ HTT_RX_HT = 0x8,
+ HTT_RX_HT_WITH_TXBF = 0x9,
+ HTT_RX_VHT = 0xC,
+ HTT_RX_VHT_WITH_TXBF = 0xD,
+};
+
+/*
+ * Fields: phy_err_valid, phy_err_code, tsf,
+ * usec_timestamp, sub_usec_timestamp
+ * ..are valid only if end_valid == 1.
+ *
+ * Fields: rssi_chains, legacy_rate_type,
+ * legacy_rate_cck, preamble_type, service,
+ * vht_sig_*
+ * ..are valid only if start_valid == 1;
+ */
+struct htt_rx_indication_ppdu {
+ u8 combined_rssi;
+ u8 sub_usec_timestamp;
+ u8 phy_err_code;
+ u8 info0; /* HTT_RX_INDICATION_INFO0_ */
+ struct {
+ u8 pri20_db;
+ u8 ext20_db;
+ u8 ext40_db;
+ u8 ext80_db;
+ } __packed rssi_chains[4];
+ __le32 tsf;
+ __le32 usec_timestamp;
+ __le32 info1; /* HTT_RX_INDICATION_INFO1_ */
+ __le32 info2; /* HTT_RX_INDICATION_INFO2_ */
+} __packed;
+
+enum htt_rx_mpdu_status {
+ HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0,
+ HTT_RX_IND_MPDU_STATUS_OK,
+ HTT_RX_IND_MPDU_STATUS_ERR_FCS,
+ HTT_RX_IND_MPDU_STATUS_ERR_DUP,
+ HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
+ HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
+ /* only accept EAPOL frames */
+ HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
+ HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
+ /* Non-data in promiscuous mode */
+ HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
+ HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
+ HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
+ HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR,
+ HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR,
+ HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR,
+
+ /*
+ * MISC: discard for unspecified reasons.
+ * Leave this enum value last.
+ */
+ HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF
+};
+
+struct htt_rx_indication_mpdu_range {
+ u8 mpdu_count;
+ u8 mpdu_range_status; /* %htt_rx_mpdu_status */
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct htt_rx_indication_prefix {
+ __le16 fw_rx_desc_bytes;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct htt_rx_indication {
+ struct htt_rx_indication_hdr hdr;
+ struct htt_rx_indication_ppdu ppdu;
+ struct htt_rx_indication_prefix prefix;
+
+ /*
+ * the following fields are both dynamically sized, so
+ * take care addressing them
+ */
+
+ /* the size of this is %fw_rx_desc_bytes */
+ struct fw_rx_desc_base fw_desc;
+
+ /*
+ * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
+ * and has %num_mpdu_ranges elements.
+ */
+ struct htt_rx_indication_mpdu_range mpdu_ranges[];
+} __packed;
+
+/* High latency version of the RX indication */
+struct htt_rx_indication_hl {
+ struct htt_rx_indication_hdr hdr;
+ struct htt_rx_indication_ppdu ppdu;
+ struct htt_rx_indication_prefix prefix;
+ struct fw_rx_desc_hl fw_desc;
+ struct htt_rx_indication_mpdu_range mpdu_ranges[];
+} __packed;
+
+struct htt_hl_rx_desc {
+ __le32 info;
+ __le32 pn_31_0;
+ union {
+ struct {
+ __le16 pn_47_32;
+ __le16 pn_63_48;
+ } pn16;
+ __le32 pn_63_32;
+ } u0;
+ __le32 pn_95_64;
+ __le32 pn_127_96;
+} __packed;
+
+static inline struct htt_rx_indication_mpdu_range *
+ htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
+{
+ void *ptr = rx_ind;
+
+ ptr += sizeof(rx_ind->hdr)
+ + sizeof(rx_ind->ppdu)
+ + sizeof(rx_ind->prefix)
+ + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4);
+ return ptr;
+}
+
+static inline struct htt_rx_indication_mpdu_range *
+ htt_rx_ind_get_mpdu_ranges_hl(struct htt_rx_indication_hl *rx_ind)
+{
+ void *ptr = rx_ind;
+
+ ptr += sizeof(rx_ind->hdr)
+ + sizeof(rx_ind->ppdu)
+ + sizeof(rx_ind->prefix)
+ + sizeof(rx_ind->fw_desc);
+ return ptr;
+}
+
+enum htt_rx_flush_mpdu_status {
+ HTT_RX_FLUSH_MPDU_DISCARD = 0,
+ HTT_RX_FLUSH_MPDU_REORDER = 1,
+};
+
+/*
+ * htt_rx_flush - discard or reorder given range of mpdus
+ *
+ * Note: host must check if all sequence numbers between
+ * [seq_num_start, seq_num_end-1] are valid.
+ */
+struct htt_rx_flush {
+ __le16 peer_id;
+ u8 tid;
+ u8 rsvd0;
+ u8 mpdu_status; /* %htt_rx_flush_mpdu_status */
+ u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */
+ u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */
+};
+
+struct htt_rx_peer_map {
+ u8 vdev_id;
+ __le16 peer_id;
+ u8 addr[6];
+ u8 rsvd0;
+ u8 rsvd1;
+} __packed;
+
+struct htt_rx_peer_unmap {
+ u8 rsvd0;
+ __le16 peer_id;
+} __packed;
+
+enum htt_txrx_sec_cast_type {
+ HTT_TXRX_SEC_MCAST = 0,
+ HTT_TXRX_SEC_UCAST
+};
+
+enum htt_rx_pn_check_type {
+ HTT_RX_NON_PN_CHECK = 0,
+ HTT_RX_PN_CHECK
+};
+
+enum htt_rx_tkip_demic_type {
+ HTT_RX_NON_TKIP_MIC = 0,
+ HTT_RX_TKIP_MIC
+};
+
+enum htt_security_types {
+ HTT_SECURITY_NONE,
+ HTT_SECURITY_WEP128,
+ HTT_SECURITY_WEP104,
+ HTT_SECURITY_WEP40,
+ HTT_SECURITY_TKIP,
+ HTT_SECURITY_TKIP_NOMIC,
+ HTT_SECURITY_AES_CCMP,
+ HTT_SECURITY_WAPI,
+
+ HTT_NUM_SECURITY_TYPES /* keep this last! */
+};
+
+#define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2
+#define ATH10K_TXRX_NUM_EXT_TIDS 19
+#define ATH10K_TXRX_NON_QOS_TID 16
+
+enum htt_security_flags {
+#define HTT_SECURITY_TYPE_MASK 0x7F
+#define HTT_SECURITY_TYPE_LSB 0
+ HTT_SECURITY_IS_UNICAST = 1 << 7
+};
+
+struct htt_security_indication {
+ union {
+ /* dont use bitfields; undefined behaviour */
+ u8 flags; /* %htt_security_flags */
+ struct {
+ u8 security_type:7, /* %htt_security_types */
+ is_unicast:1;
+ } __packed;
+ } __packed;
+ __le16 peer_id;
+ u8 michael_key[8];
+ u8 wapi_rsc[16];
+} __packed;
+
+#define HTT_RX_BA_INFO0_TID_MASK 0x000F
+#define HTT_RX_BA_INFO0_TID_LSB 0
+#define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0
+#define HTT_RX_BA_INFO0_PEER_ID_LSB 4
+
+struct htt_rx_addba {
+ u8 window_size;
+ __le16 info0; /* %HTT_RX_BA_INFO0_ */
+} __packed;
+
+struct htt_rx_delba {
+ u8 rsvd0;
+ __le16 info0; /* %HTT_RX_BA_INFO0_ */
+} __packed;
+
+enum htt_data_tx_status {
+ HTT_DATA_TX_STATUS_OK = 0,
+ HTT_DATA_TX_STATUS_DISCARD = 1,
+ HTT_DATA_TX_STATUS_NO_ACK = 2,
+ HTT_DATA_TX_STATUS_POSTPONE = 3 /* HL only */
+};
+
+enum htt_data_tx_flags {
+#define HTT_DATA_TX_STATUS_MASK 0x07
+#define HTT_DATA_TX_STATUS_LSB 0
+#define HTT_DATA_TX_TID_MASK 0x78
+#define HTT_DATA_TX_TID_LSB 3
+ HTT_DATA_TX_TID_INVALID = 1 << 7
+};
+
+#define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
+
+struct htt_append_retries {
+ __le16 msdu_id;
+ u8 tx_retries;
+ u8 flag;
+} __packed;
+
+struct htt_data_tx_completion_ext {
+ struct htt_append_retries a_retries;
+ __le32 t_stamp;
+ __le16 msdus_rssi[];
+} __packed;
+
+/*
+ * @brief target -> host TX completion indication message definition
+ *
+ * @details
+ * The following diagram shows the format of the TX completion indication sent
+ * from the target to the host
+ *
+ * |31 28|27|26|25|24|23 16| 15 |14 11|10 8|7 0|
+ * |-------------------------------------------------------------|
+ * header: |rsvd |A2|TP|A1|A0| num | t_i| tid |status| msg_type |
+ * |-------------------------------------------------------------|
+ * payload: | MSDU1 ID | MSDU0 ID |
+ * |-------------------------------------------------------------|
+ * : MSDU3 ID : MSDU2 ID :
+ * |-------------------------------------------------------------|
+ * | struct htt_tx_compl_ind_append_retries |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
+ * | struct htt_tx_compl_ind_append_tx_tstamp |
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
+ * | MSDU1 ACK RSSI | MSDU0 ACK RSSI |
+ * |-------------------------------------------------------------|
+ * : MSDU3 ACK RSSI : MSDU2 ACK RSSI :
+ * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -|
+ * -msg_type
+ * Bits 7:0
+ * Purpose: identifies this as HTT TX completion indication
+ * -status
+ * Bits 10:8
+ * Purpose: the TX completion status of payload fragmentations descriptors
+ * Value: could be HTT_TX_COMPL_IND_STAT_OK or HTT_TX_COMPL_IND_STAT_DISCARD
+ * -tid
+ * Bits 14:11
+ * Purpose: the tid associated with those fragmentation descriptors. It is
+ * valid or not, depending on the tid_invalid bit.
+ * Value: 0 to 15
+ * -tid_invalid
+ * Bits 15:15
+ * Purpose: this bit indicates whether the tid field is valid or not
+ * Value: 0 indicates valid, 1 indicates invalid
+ * -num
+ * Bits 23:16
+ * Purpose: the number of payload in this indication
+ * Value: 1 to 255
+ * -A0 = append
+ * Bits 24:24
+ * Purpose: append the struct htt_tx_compl_ind_append_retries which contains
+ * the number of tx retries for one MSDU at the end of this message
+ * Value: 0 indicates no appending, 1 indicates appending
+ * -A1 = append1
+ * Bits 25:25
+ * Purpose: Append the struct htt_tx_compl_ind_append_tx_tstamp which
+ * contains the timestamp info for each TX msdu id in payload.
+ * Value: 0 indicates no appending, 1 indicates appending
+ * -TP = MSDU tx power presence
+ * Bits 26:26
+ * Purpose: Indicate whether the TX_COMPL_IND includes a tx power report
+ * for each MSDU referenced by the TX_COMPL_IND message.
+ * The order of the per-MSDU tx power reports matches the order
+ * of the MSDU IDs.
+ * Value: 0 indicates not appending, 1 indicates appending
+ * -A2 = append2
+ * Bits 27:27
+ * Purpose: Indicate whether data ACK RSSI is appended for each MSDU in
+ * TX_COMP_IND message. The order of the per-MSDU ACK RSSI report
+ * matches the order of the MSDU IDs.
+ * The ACK RSSI values are valid when status is COMPLETE_OK (and
+ * this append2 bit is set).
+ * Value: 0 indicates not appending, 1 indicates appending
+ */
+
+struct htt_data_tx_completion {
+ union {
+ u8 flags;
+ struct {
+ u8 status:3,
+ tid:4,
+ tid_invalid:1;
+ } __packed;
+ } __packed;
+ u8 num_msdus;
+ u8 flags2; /* HTT_TX_CMPL_FLAG_DATA_RSSI */
+ __le16 msdus[]; /* variable length based on %num_msdus */
+} __packed;
+
+#define HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK GENMASK(15, 0)
+#define HTT_TX_PPDU_DUR_INFO0_TID_MASK GENMASK(20, 16)
+
+struct htt_data_tx_ppdu_dur {
+ __le32 info0; /* HTT_TX_PPDU_DUR_INFO0_ */
+ __le32 tx_duration; /* in usecs */
+} __packed;
+
+#define HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK GENMASK(7, 0)
+
+struct htt_data_tx_compl_ppdu_dur {
+ __le32 info0; /* HTT_TX_COMPL_PPDU_DUR_INFO0_ */
+ struct htt_data_tx_ppdu_dur ppdu_dur[];
+} __packed;
+
+struct htt_tx_compl_ind_base {
+ u32 hdr;
+ u16 payload[1/*or more*/];
+} __packed;
+
+struct htt_rc_tx_done_params {
+ u32 rate_code;
+ u32 rate_code_flags;
+ u32 flags;
+ u32 num_enqued; /* 1 for non-AMPDU */
+ u32 num_retries;
+ u32 num_failed; /* for AMPDU */
+ u32 ack_rssi;
+ u32 time_stamp;
+ u32 is_probe;
+};
+
+struct htt_rc_update {
+ u8 vdev_id;
+ __le16 peer_id;
+ u8 addr[6];
+ u8 num_elems;
+ u8 rsvd0;
+ struct htt_rc_tx_done_params params[]; /* variable length %num_elems */
+} __packed;
+
+/* see htt_rx_indication for similar fields and descriptions */
+struct htt_rx_fragment_indication {
+ union {
+ u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */
+ struct {
+ u8 ext_tid:5,
+ flush_valid:1;
+ } __packed;
+ } __packed;
+ __le16 peer_id;
+ __le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */
+ __le16 fw_rx_desc_bytes;
+ __le16 rsvd0;
+
+ u8 fw_msdu_rx_desc[];
+} __packed;
+
+#define ATH10K_IEEE80211_EXTIV BIT(5)
+#define ATH10K_IEEE80211_TKIP_MICLEN 8 /* trailing MIC */
+
+#define HTT_RX_FRAG_IND_INFO0_HEADER_LEN 16
+
+#define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F
+#define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0
+#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
+#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB 5
+
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB 0
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
+
+struct htt_rx_pn_ind {
+ __le16 peer_id;
+ u8 tid;
+ u8 seqno_start;
+ u8 seqno_end;
+ u8 pn_ie_count;
+ u8 reserved;
+ u8 pn_ies[];
+} __packed;
+
+struct htt_rx_offload_msdu {
+ __le16 msdu_len;
+ __le16 peer_id;
+ u8 vdev_id;
+ u8 tid;
+ u8 fw_desc;
+ u8 payload[];
+} __packed;
+
+struct htt_rx_offload_ind {
+ u8 reserved;
+ __le16 msdu_count;
+} __packed;
+
+struct htt_rx_in_ord_msdu_desc {
+ __le32 msdu_paddr;
+ __le16 msdu_len;
+ u8 fw_desc;
+ u8 reserved;
+} __packed;
+
+struct htt_rx_in_ord_msdu_desc_ext {
+ __le64 msdu_paddr;
+ __le16 msdu_len;
+ u8 fw_desc;
+ u8 reserved;
+} __packed;
+
+struct htt_rx_in_ord_ind {
+ u8 info;
+ __le16 peer_id;
+ u8 vdev_id;
+ u8 reserved;
+ __le16 msdu_count;
+ union {
+ DECLARE_FLEX_ARRAY(struct htt_rx_in_ord_msdu_desc,
+ msdu_descs32);
+ DECLARE_FLEX_ARRAY(struct htt_rx_in_ord_msdu_desc_ext,
+ msdu_descs64);
+ } __packed;
+} __packed;
+
+#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
+#define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
+#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
+#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
+#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
+#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
+
+/*
+ * target -> host test message definition
+ *
+ * The following field definitions describe the format of the test
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header, followed by a variable
+ * number of 32-bit integer values, followed by a variable number
+ * of 8-bit character values.
+ *
+ * |31 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | num chars | num ints | msg type |
+ * |-----------------------------------------------------------|
+ * | int 0 |
+ * |-----------------------------------------------------------|
+ * | int 1 |
+ * |-----------------------------------------------------------|
+ * | ... |
+ * |-----------------------------------------------------------|
+ * | char 3 | char 2 | char 1 | char 0 |
+ * |-----------------------------------------------------------|
+ * | | | ... | char 4 |
+ * |-----------------------------------------------------------|
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this as a test message
+ * Value: HTT_MSG_TYPE_TEST
+ * - NUM_INTS
+ * Bits 15:8
+ * Purpose: indicate how many 32-bit integers follow the message header
+ * - NUM_CHARS
+ * Bits 31:16
+ * Purpose: indicate how many 8-bit characters follow the series of integers
+ */
+struct htt_rx_test {
+ u8 num_ints;
+ __le16 num_chars;
+
+ /* payload consists of 2 lists:
+ * a) num_ints * sizeof(__le32)
+ * b) num_chars * sizeof(u8) aligned to 4bytes
+ */
+ u8 payload[];
+} __packed;
+
+static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test)
+{
+ return (__le32 *)rx_test->payload;
+}
+
+static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
+{
+ return rx_test->payload + (rx_test->num_ints * sizeof(__le32));
+}
+
+/*
+ * target -> host packet log message
+ *
+ * The following field definitions describe the format of the packet log
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header,followed by a variable number
+ * of 32-bit character values.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | | | | msg type |
+ * |-----------------------------------------------------------|
+ * | payload |
+ * |-----------------------------------------------------------|
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this as a test message
+ * Value: HTT_MSG_TYPE_PACKETLOG
+ */
+struct htt_pktlog_msg {
+ u8 pad[3];
+ u8 payload[];
+} __packed;
+
+struct htt_dbg_stats_rx_reorder_stats {
+ /* Non QoS MPDUs received */
+ __le32 deliver_non_qos;
+
+ /* MPDUs received in-order */
+ __le32 deliver_in_order;
+
+ /* Flush due to reorder timer expired */
+ __le32 deliver_flush_timeout;
+
+ /* Flush due to move out of window */
+ __le32 deliver_flush_oow;
+
+ /* Flush due to DELBA */
+ __le32 deliver_flush_delba;
+
+ /* MPDUs dropped due to FCS error */
+ __le32 fcs_error;
+
+ /* MPDUs dropped due to monitor mode non-data packet */
+ __le32 mgmt_ctrl;
+
+ /* MPDUs dropped due to invalid peer */
+ __le32 invalid_peer;
+
+ /* MPDUs dropped due to duplication (non aggregation) */
+ __le32 dup_non_aggr;
+
+ /* MPDUs dropped due to processed before */
+ __le32 dup_past;
+
+ /* MPDUs dropped due to duplicate in reorder queue */
+ __le32 dup_in_reorder;
+
+ /* Reorder timeout happened */
+ __le32 reorder_timeout;
+
+ /* invalid bar ssn */
+ __le32 invalid_bar_ssn;
+
+ /* reorder reset due to bar ssn */
+ __le32 ssn_reset;
+};
+
+struct htt_dbg_stats_wal_tx_stats {
+ /* Num HTT cookies queued to dispatch list */
+ __le32 comp_queued;
+
+ /* Num HTT cookies dispatched */
+ __le32 comp_delivered;
+
+ /* Num MSDU queued to WAL */
+ __le32 msdu_enqued;
+
+ /* Num MPDU queue to WAL */
+ __le32 mpdu_enqued;
+
+ /* Num MSDUs dropped by WMM limit */
+ __le32 wmm_drop;
+
+ /* Num Local frames queued */
+ __le32 local_enqued;
+
+ /* Num Local frames done */
+ __le32 local_freed;
+
+ /* Num queued to HW */
+ __le32 hw_queued;
+
+ /* Num PPDU reaped from HW */
+ __le32 hw_reaped;
+
+ /* Num underruns */
+ __le32 underrun;
+
+ /* Num PPDUs cleaned up in TX abort */
+ __le32 tx_abort;
+
+ /* Num MPDUs requeued by SW */
+ __le32 mpdus_requeued;
+
+ /* excessive retries */
+ __le32 tx_ko;
+
+ /* data hw rate code */
+ __le32 data_rc;
+
+ /* Scheduler self triggers */
+ __le32 self_triggers;
+
+ /* frames dropped due to excessive sw retries */
+ __le32 sw_retry_failure;
+
+ /* illegal rate phy errors */
+ __le32 illgl_rate_phy_err;
+
+ /* wal pdev continuous xretry */
+ __le32 pdev_cont_xretry;
+
+ /* wal pdev continuous xretry */
+ __le32 pdev_tx_timeout;
+
+ /* wal pdev resets */
+ __le32 pdev_resets;
+
+ __le32 phy_underrun;
+
+ /* MPDU is more than txop limit */
+ __le32 txop_ovf;
+} __packed;
+
+struct htt_dbg_stats_wal_rx_stats {
+ /* Cnts any change in ring routing mid-ppdu */
+ __le32 mid_ppdu_route_change;
+
+ /* Total number of statuses processed */
+ __le32 status_rcvd;
+
+ /* Extra frags on rings 0-3 */
+ __le32 r0_frags;
+ __le32 r1_frags;
+ __le32 r2_frags;
+ __le32 r3_frags;
+
+ /* MSDUs / MPDUs delivered to HTT */
+ __le32 htt_msdus;
+ __le32 htt_mpdus;
+
+ /* MSDUs / MPDUs delivered to local stack */
+ __le32 loc_msdus;
+ __le32 loc_mpdus;
+
+ /* AMSDUs that have more MSDUs than the status ring size */
+ __le32 oversize_amsdu;
+
+ /* Number of PHY errors */
+ __le32 phy_errs;
+
+ /* Number of PHY errors drops */
+ __le32 phy_err_drop;
+
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ __le32 mpdu_errs;
+} __packed;
+
+struct htt_dbg_stats_wal_peer_stats {
+ __le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
+} __packed;
+
+struct htt_dbg_stats_wal_pdev_txrx {
+ struct htt_dbg_stats_wal_tx_stats tx_stats;
+ struct htt_dbg_stats_wal_rx_stats rx_stats;
+ struct htt_dbg_stats_wal_peer_stats peer_stats;
+} __packed;
+
+struct htt_dbg_stats_rx_rate_info {
+ __le32 mcs[10];
+ __le32 sgi[10];
+ __le32 nss[4];
+ __le32 stbc[10];
+ __le32 bw[3];
+ __le32 pream[6];
+ __le32 ldpc;
+ __le32 txbf;
+};
+
+/*
+ * htt_dbg_stats_status -
+ * present - The requested stats have been delivered in full.
+ * This indicates that either the stats information was contained
+ * in its entirety within this message, or else this message
+ * completes the delivery of the requested stats info that was
+ * partially delivered through earlier STATS_CONF messages.
+ * partial - The requested stats have been delivered in part.
+ * One or more subsequent STATS_CONF messages with the same
+ * cookie value will be sent to deliver the remainder of the
+ * information.
+ * error - The requested stats could not be delivered, for example due
+ * to a shortage of memory to construct a message holding the
+ * requested stats.
+ * invalid - The requested stat type is either not recognized, or the
+ * target is configured to not gather the stats type in question.
+ * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ * series_done - This special value indicates that no further stats info
+ * elements are present within a series of stats info elems
+ * (within a stats upload confirmation message).
+ */
+enum htt_dbg_stats_status {
+ HTT_DBG_STATS_STATUS_PRESENT = 0,
+ HTT_DBG_STATS_STATUS_PARTIAL = 1,
+ HTT_DBG_STATS_STATUS_ERROR = 2,
+ HTT_DBG_STATS_STATUS_INVALID = 3,
+ HTT_DBG_STATS_STATUS_SERIES_DONE = 7
+};
+
+/*
+ * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
+ *
+ * The following field definitions describe the format of the HTT host
+ * to target frag_desc/msdu_ext bank configuration message.
+ * The message contains the based address and the min and max id of the
+ * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and
+ * MSDU_EXT/FRAG_DESC.
+ * HTT will use id in HTT descriptor instead sending the frag_desc_ptr.
+ * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0
+ * the hardware does the mapping/translation.
+ *
+ * Total banks that can be configured is configured to 16.
+ *
+ * This should be called before any TX has be initiated by the HTT
+ *
+ * |31 16|15 8|7 5|4 0|
+ * |------------------------------------------------------------|
+ * | DESC_SIZE | NUM_BANKS | RES |SWP|pdev| msg type |
+ * |------------------------------------------------------------|
+ * | BANK0_BASE_ADDRESS |
+ * |------------------------------------------------------------|
+ * | ... |
+ * |------------------------------------------------------------|
+ * | BANK15_BASE_ADDRESS |
+ * |------------------------------------------------------------|
+ * | BANK0_MAX_ID | BANK0_MIN_ID |
+ * |------------------------------------------------------------|
+ * | ... |
+ * |------------------------------------------------------------|
+ * | BANK15_MAX_ID | BANK15_MIN_ID |
+ * |------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Value: 0x6
+ * - BANKx_BASE_ADDRESS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to specify the base address of the MSDU_EXT
+ * bank physical/bus address.
+ * - BANKx_MIN_ID
+ * Bits 15:0
+ * Purpose: Provide a mechanism to specify the min index that needs to
+ * mapped.
+ * - BANKx_MAX_ID
+ * Bits 31:16
+ * Purpose: Provide a mechanism to specify the max index that needs to
+ *
+ */
+struct htt_frag_desc_bank_id {
+ __le16 bank_min_id;
+ __le16 bank_max_id;
+} __packed;
+
+/* real is 16 but it wouldn't fit in the max htt message size
+ * so we use a conservatively safe value for now
+ */
+#define HTT_FRAG_DESC_BANK_MAX 4
+
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0
+#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP BIT(2)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID BIT(3)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK BIT(4)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB 4
+
+enum htt_q_depth_type {
+ HTT_Q_DEPTH_TYPE_BYTES = 0,
+ HTT_Q_DEPTH_TYPE_MSDUS = 1,
+};
+
+#define HTT_TX_Q_STATE_NUM_PEERS (TARGET_10_4_NUM_QCACHE_PEERS_MAX + \
+ TARGET_10_4_NUM_VDEVS)
+#define HTT_TX_Q_STATE_NUM_TIDS 8
+#define HTT_TX_Q_STATE_ENTRY_SIZE 1
+#define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0
+
+/**
+ * struct htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
+ *
+ * Defines host q state format and behavior. See htt_q_state.
+ *
+ * @paddr: Queue physical address
+ * @num_peers: Number of supported peers
+ * @num_tids: Number of supported TIDs
+ * @record_size: Defines the size of each host q entry in bytes. In practice
+ * however firmware (at least 10.4.3-00191) ignores this host
+ * configuration value and uses hardcoded value of 1.
+ * @record_multiplier: This is valid only when q depth type is MSDUs. It
+ * defines the exponent for the power of 2 multiplication.
+ * @pad: struct padding for 32-bit alignment
+ */
+struct htt_q_state_conf {
+ __le32 paddr;
+ __le16 num_peers;
+ __le16 num_tids;
+ u8 record_size;
+ u8 record_multiplier;
+ u8 pad[2];
+} __packed;
+
+struct htt_frag_desc_bank_cfg32 {
+ u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
+ u8 num_banks;
+ u8 desc_size;
+ __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_q_state_conf q_state;
+} __packed;
+
+struct htt_frag_desc_bank_cfg64 {
+ u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
+ u8 num_banks;
+ u8 desc_size;
+ __le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_q_state_conf q_state;
+} __packed;
+
+#define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128
+#define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f
+#define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0
+#define HTT_TX_Q_STATE_ENTRY_EXP_MASK 0xc0
+#define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6
+
+/**
+ * struct htt_q_state - shared between host and firmware via DMA
+ *
+ * This structure is used for the host to expose it's software queue state to
+ * firmware so that its rate control can schedule fetch requests for optimized
+ * performance. This is most notably used for MU-MIMO aggregation when multiple
+ * MU clients are connected.
+ *
+ * @count: Each element defines the host queue depth. When q depth type was
+ * configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as:
+ * FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and
+ * HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as
+ * HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 **
+ * record_multiplier (see htt_q_state_conf).
+ * @map: Used by firmware to quickly check which host queues are not empty. It
+ * is a bitmap simply saying.
+ * @seq: Used by firmware to quickly check if the host queues were updated
+ * since it last checked.
+ *
+ * FIXME: Is the q_state map[] size calculation really correct?
+ */
+struct htt_q_state {
+ u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS];
+ u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32];
+ __le32 seq;
+} __packed;
+
+#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK 0x0fff
+#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB 0
+#define HTT_TX_FETCH_RECORD_INFO_TID_MASK 0xf000
+#define HTT_TX_FETCH_RECORD_INFO_TID_LSB 12
+
+struct htt_tx_fetch_record {
+ __le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */
+ __le16 num_msdus;
+ __le32 num_bytes;
+} __packed;
+
+struct htt_tx_fetch_ind {
+ u8 pad0;
+ __le16 fetch_seq_num;
+ __le32 token;
+ __le16 num_resp_ids;
+ __le16 num_records;
+ union {
+ /* ath10k_htt_get_tx_fetch_ind_resp_ids() */
+ DECLARE_FLEX_ARRAY(__le32, resp_ids);
+ DECLARE_FLEX_ARRAY(struct htt_tx_fetch_record, records);
+ } __packed;
+} __packed;
+
+static inline void *
+ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind)
+{
+ return (void *)&ind->records[le16_to_cpu(ind->num_records)];
+}
+
+struct htt_tx_fetch_resp {
+ u8 pad0;
+ __le16 resp_id;
+ __le16 fetch_seq_num;
+ __le16 num_records;
+ __le32 token;
+ struct htt_tx_fetch_record records[];
+} __packed;
+
+struct htt_tx_fetch_confirm {
+ u8 pad0;
+ __le16 num_resp_ids;
+ __le32 resp_ids[];
+} __packed;
+
+enum htt_tx_mode_switch_mode {
+ HTT_TX_MODE_SWITCH_PUSH = 0,
+ HTT_TX_MODE_SWITCH_PUSH_PULL = 1,
+};
+
+#define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE BIT(0)
+#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK 0xfffe
+#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB 1
+
+#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK 0x0003
+#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB 0
+#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK 0xfffc
+#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB 2
+
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK 0x0fff
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB 0
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK 0xf000
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB 12
+
+struct htt_tx_mode_switch_record {
+ __le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */
+ __le16 num_max_msdus;
+} __packed;
+
+struct htt_tx_mode_switch_ind {
+ u8 pad0;
+ __le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */
+ __le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */
+ u8 pad1[2];
+ struct htt_tx_mode_switch_record records[];
+} __packed;
+
+struct htt_channel_change {
+ u8 pad[3];
+ __le32 freq;
+ __le32 center_freq1;
+ __le32 center_freq2;
+ __le32 phymode;
+} __packed;
+
+struct htt_per_peer_tx_stats_ind {
+ __le32 succ_bytes;
+ __le32 retry_bytes;
+ __le32 failed_bytes;
+ u8 ratecode;
+ u8 flags;
+ __le16 peer_id;
+ __le16 succ_pkts;
+ __le16 retry_pkts;
+ __le16 failed_pkts;
+ __le16 tx_duration;
+ __le32 reserved1;
+ __le32 reserved2;
+} __packed;
+
+struct htt_peer_tx_stats {
+ u8 num_ppdu;
+ u8 ppdu_len;
+ u8 version;
+ u8 payload[];
+} __packed;
+
+#define ATH10K_10_2_TX_STATS_OFFSET 136
+#define PEER_STATS_FOR_NO_OF_PPDUS 4
+
+struct ath10k_10_2_peer_tx_stats {
+ u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 flags[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le32 tx_duration;
+ u8 tx_ppdu_cnt;
+ u8 peer_id;
+} __packed;
+
+union htt_rx_pn_t {
+ /* WEP: 24-bit PN */
+ u32 pn24;
+
+ /* TKIP or CCMP: 48-bit PN */
+ u64 pn48;
+
+ /* WAPI: 128-bit PN */
+ u64 pn128[2];
+};
+
+struct htt_cmd {
+ struct htt_cmd_hdr hdr;
+ union {
+ struct htt_ver_req ver_req;
+ struct htt_mgmt_tx_desc mgmt_tx;
+ struct htt_data_tx_desc data_tx;
+ struct htt_rx_ring_setup_32 rx_setup_32;
+ struct htt_rx_ring_setup_64 rx_setup_64;
+ struct htt_stats_req stats_req;
+ struct htt_oob_sync_req oob_sync_req;
+ struct htt_aggr_conf aggr_conf;
+ struct htt_aggr_conf_v2 aggr_conf_v2;
+ struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32;
+ struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64;
+ struct htt_tx_fetch_resp tx_fetch_resp;
+ };
+} __packed;
+
+struct htt_resp {
+ struct htt_resp_hdr hdr;
+ union {
+ struct htt_ver_resp ver_resp;
+ struct htt_mgmt_tx_completion mgmt_tx_completion;
+ struct htt_data_tx_completion data_tx_completion;
+ struct htt_rx_indication rx_ind;
+ struct htt_rx_indication_hl rx_ind_hl;
+ struct htt_rx_fragment_indication rx_frag_ind;
+ struct htt_rx_peer_map peer_map;
+ struct htt_rx_peer_unmap peer_unmap;
+ struct htt_rx_flush rx_flush;
+ struct htt_rx_addba rx_addba;
+ struct htt_rx_delba rx_delba;
+ struct htt_security_indication security_indication;
+ struct htt_rc_update rc_update;
+ struct htt_rx_test rx_test;
+ struct htt_pktlog_msg pktlog_msg;
+ struct htt_rx_pn_ind rx_pn_ind;
+ struct htt_rx_offload_ind rx_offload_ind;
+ struct htt_rx_in_ord_ind rx_in_ord_ind;
+ struct htt_tx_fetch_ind tx_fetch_ind;
+ struct htt_tx_fetch_confirm tx_fetch_confirm;
+ struct htt_tx_mode_switch_ind tx_mode_switch_ind;
+ struct htt_channel_change chan_change;
+ struct htt_peer_tx_stats peer_tx_stats;
+ } __packed;
+} __packed;
+
+/*** host side structures follow ***/
+
+struct htt_tx_done {
+ u16 msdu_id;
+ u16 status;
+ u8 ack_rssi;
+};
+
+enum htt_tx_compl_state {
+ HTT_TX_COMPL_STATE_NONE,
+ HTT_TX_COMPL_STATE_ACK,
+ HTT_TX_COMPL_STATE_NOACK,
+ HTT_TX_COMPL_STATE_DISCARD,
+};
+
+struct htt_peer_map_event {
+ u8 vdev_id;
+ u16 peer_id;
+ u8 addr[ETH_ALEN];
+};
+
+struct htt_peer_unmap_event {
+ u16 peer_id;
+};
+
+struct ath10k_htt_txbuf_32 {
+ struct htt_data_tx_desc_frag frags[2];
+ struct ath10k_htc_hdr htc_hdr;
+ struct htt_cmd_hdr cmd_hdr;
+ struct htt_data_tx_desc cmd_tx;
+} __packed __aligned(4);
+
+struct ath10k_htt_txbuf_64 {
+ struct htt_data_tx_desc_frag frags[2];
+ struct ath10k_htc_hdr htc_hdr;
+ struct htt_cmd_hdr cmd_hdr;
+ struct htt_data_tx_desc_64 cmd_tx;
+} __packed __aligned(4);
+
+struct ath10k_htt {
+ struct ath10k *ar;
+ enum ath10k_htc_ep_id eid;
+
+ struct sk_buff_head rx_indication_head;
+
+ u8 target_version_major;
+ u8 target_version_minor;
+ struct completion target_version_received;
+ u8 max_num_amsdu;
+ u8 max_num_ampdu;
+
+ const enum htt_t2h_msg_type *t2h_msg_types;
+ u32 t2h_msg_types_max;
+
+ struct {
+ /*
+ * Ring of network buffer objects - This ring is
+ * used exclusively by the host SW. This ring
+ * mirrors the dev_addrs_ring that is shared
+ * between the host SW and the MAC HW. The host SW
+ * uses this netbufs ring to locate the network
+ * buffer objects whose data buffers the HW has
+ * filled.
+ */
+ struct sk_buff **netbufs_ring;
+
+ /* This is used only with firmware supporting IN_ORD_IND.
+ *
+ * With Full Rx Reorder the HTT Rx Ring is more of a temporary
+ * buffer ring from which buffer addresses are copied by the
+ * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
+ * pointing to specific (re-ordered) buffers.
+ *
+ * FIXME: With kernel generic hashing functions there's a lot
+ * of hash collisions for sk_buffs.
+ */
+ bool in_ord_rx;
+ DECLARE_HASHTABLE(skb_table, 4);
+
+ /*
+ * Ring of buffer addresses -
+ * This ring holds the "physical" device address of the
+ * rx buffers the host SW provides for the MAC HW to
+ * fill.
+ */
+ union {
+ __le64 *paddrs_ring_64;
+ __le32 *paddrs_ring_32;
+ };
+
+ /*
+ * Base address of ring, as a "physical" device address
+ * rather than a CPU address.
+ */
+ dma_addr_t base_paddr;
+
+ /* how many elems in the ring (power of 2) */
+ int size;
+
+ /* size - 1 */
+ unsigned int size_mask;
+
+ /* how many rx buffers to keep in the ring */
+ int fill_level;
+
+ /* how many rx buffers (full+empty) are in the ring */
+ int fill_cnt;
+
+ /*
+ * alloc_idx - where HTT SW has deposited empty buffers
+ * This is allocated in consistent mem, so that the FW can
+ * read this variable, and program the HW's FW_IDX reg with
+ * the value of this shadow register.
+ */
+ struct {
+ __le32 *vaddr;
+ dma_addr_t paddr;
+ } alloc_idx;
+
+ /* where HTT SW has processed bufs filled by rx MAC DMA */
+ struct {
+ unsigned int msdu_payld;
+ } sw_rd_idx;
+
+ /*
+ * refill_retry_timer - timer triggered when the ring is
+ * not refilled to the level expected
+ */
+ struct timer_list refill_retry_timer;
+
+ /* Protects access to all rx ring buffer state variables */
+ spinlock_t lock;
+ } rx_ring;
+
+ unsigned int prefetch_len;
+
+ /* Protects access to pending_tx, num_pending_tx */
+ spinlock_t tx_lock;
+ int max_num_pending_tx;
+ int num_pending_tx;
+ int num_pending_mgmt_tx;
+ struct idr pending_tx;
+ wait_queue_head_t empty_tx_wq;
+
+ /* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */
+ DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
+
+ /* set if host-fw communication goes haywire
+ * used to avoid further failures
+ */
+ bool rx_confused;
+ atomic_t num_mpdus_ready;
+
+ /* This is used to group tx/rx completions separately and process them
+ * in batches to reduce cache stalls
+ */
+ struct sk_buff_head rx_msdus_q;
+ struct sk_buff_head rx_in_ord_compl_q;
+ struct sk_buff_head tx_fetch_ind_q;
+
+ /* rx_status template */
+ struct ieee80211_rx_status rx_status;
+
+ struct {
+ dma_addr_t paddr;
+ union {
+ struct htt_msdu_ext_desc *vaddr_desc_32;
+ struct htt_msdu_ext_desc_64 *vaddr_desc_64;
+ };
+ size_t size;
+ } frag_desc;
+
+ struct {
+ dma_addr_t paddr;
+ union {
+ struct ath10k_htt_txbuf_32 *vaddr_txbuff_32;
+ struct ath10k_htt_txbuf_64 *vaddr_txbuff_64;
+ };
+ size_t size;
+ } txbuf;
+
+ struct {
+ bool enabled;
+ struct htt_q_state *vaddr;
+ dma_addr_t paddr;
+ u16 num_push_allowed;
+ u16 num_peers;
+ u16 num_tids;
+ enum htt_tx_mode_switch_mode mode;
+ enum htt_q_depth_type type;
+ } tx_q_state;
+
+ bool tx_mem_allocated;
+ const struct ath10k_htt_tx_ops *tx_ops;
+ const struct ath10k_htt_rx_ops *rx_ops;
+ bool disable_tx_comp;
+ bool bundle_tx;
+ struct sk_buff_head tx_req_head;
+ struct sk_buff_head tx_complete_head;
+};
+
+struct ath10k_htt_tx_ops {
+ int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt);
+ int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt);
+ int (*htt_alloc_frag_desc)(struct ath10k_htt *htt);
+ void (*htt_free_frag_desc)(struct ath10k_htt *htt);
+ int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu);
+ int (*htt_alloc_txbuff)(struct ath10k_htt *htt);
+ void (*htt_free_txbuff)(struct ath10k_htt *htt);
+ int (*htt_h2t_aggr_cfg_msg)(struct ath10k_htt *htt,
+ u8 max_subfrms_ampdu,
+ u8 max_subfrms_amsdu);
+ void (*htt_flush_tx)(struct ath10k_htt *htt);
+};
+
+static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt)
+{
+ if (!htt->tx_ops->htt_send_rx_ring_cfg)
+ return -EOPNOTSUPP;
+
+ return htt->tx_ops->htt_send_rx_ring_cfg(htt);
+}
+
+static inline int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+{
+ if (!htt->tx_ops->htt_send_frag_desc_bank_cfg)
+ return -EOPNOTSUPP;
+
+ return htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
+}
+
+static inline int ath10k_htt_alloc_frag_desc(struct ath10k_htt *htt)
+{
+ if (!htt->tx_ops->htt_alloc_frag_desc)
+ return -EOPNOTSUPP;
+
+ return htt->tx_ops->htt_alloc_frag_desc(htt);
+}
+
+static inline void ath10k_htt_free_frag_desc(struct ath10k_htt *htt)
+{
+ if (htt->tx_ops->htt_free_frag_desc)
+ htt->tx_ops->htt_free_frag_desc(htt);
+}
+
+static inline int ath10k_htt_tx(struct ath10k_htt *htt,
+ enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu)
+{
+ return htt->tx_ops->htt_tx(htt, txmode, msdu);
+}
+
+static inline void ath10k_htt_flush_tx(struct ath10k_htt *htt)
+{
+ if (htt->tx_ops->htt_flush_tx)
+ htt->tx_ops->htt_flush_tx(htt);
+}
+
+static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt)
+{
+ if (!htt->tx_ops->htt_alloc_txbuff)
+ return -EOPNOTSUPP;
+
+ return htt->tx_ops->htt_alloc_txbuff(htt);
+}
+
+static inline void ath10k_htt_free_txbuff(struct ath10k_htt *htt)
+{
+ if (htt->tx_ops->htt_free_txbuff)
+ htt->tx_ops->htt_free_txbuff(htt);
+}
+
+static inline int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
+ u8 max_subfrms_ampdu,
+ u8 max_subfrms_amsdu)
+
+{
+ if (!htt->tx_ops->htt_h2t_aggr_cfg_msg)
+ return -EOPNOTSUPP;
+
+ return htt->tx_ops->htt_h2t_aggr_cfg_msg(htt,
+ max_subfrms_ampdu,
+ max_subfrms_amsdu);
+}
+
+struct ath10k_htt_rx_ops {
+ size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
+ void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
+ void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr,
+ int idx);
+ void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt);
+ void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
+ bool (*htt_rx_proc_rx_frag_ind)(struct ath10k_htt *htt,
+ struct htt_rx_fragment_indication *rx,
+ struct sk_buff *skb);
+};
+
+static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt *htt)
+{
+ if (!htt->rx_ops->htt_get_rx_ring_size)
+ return 0;
+
+ return htt->rx_ops->htt_get_rx_ring_size(htt);
+}
+
+static inline void ath10k_htt_config_paddrs_ring(struct ath10k_htt *htt,
+ void *vaddr)
+{
+ if (htt->rx_ops->htt_config_paddrs_ring)
+ htt->rx_ops->htt_config_paddrs_ring(htt, vaddr);
+}
+
+static inline void ath10k_htt_set_paddrs_ring(struct ath10k_htt *htt,
+ dma_addr_t paddr,
+ int idx)
+{
+ if (htt->rx_ops->htt_set_paddrs_ring)
+ htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
+}
+
+static inline void *ath10k_htt_get_vaddr_ring(struct ath10k_htt *htt)
+{
+ if (!htt->rx_ops->htt_get_vaddr_ring)
+ return NULL;
+
+ return htt->rx_ops->htt_get_vaddr_ring(htt);
+}
+
+static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt *htt, int idx)
+{
+ if (htt->rx_ops->htt_reset_paddrs_ring)
+ htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
+}
+
+static inline bool ath10k_htt_rx_proc_rx_frag_ind(struct ath10k_htt *htt,
+ struct htt_rx_fragment_indication *rx,
+ struct sk_buff *skb)
+{
+ if (!htt->rx_ops->htt_rx_proc_rx_frag_ind)
+ return true;
+
+ return htt->rx_ops->htt_rx_proc_rx_frag_ind(htt, rx, skb);
+}
+
+/* the driver strongly assumes that the rx header status be 64 bytes long,
+ * so all possible rx_desc structures must respect this assumption.
+ */
+#define RX_HTT_HDR_STATUS_LEN 64
+
+/* The rx descriptor structure layout is programmed via rx ring setup
+ * so that FW knows how to transfer the rx descriptor to the host.
+ * Unfortunately, though, QCA6174's firmware doesn't currently behave correctly
+ * when modifying the structure layout of the rx descriptor beyond what it expects
+ * (even if it correctly programmed during the rx ring setup).
+ * Therefore we must keep two different memory layouts, abstract the rx descriptor
+ * representation and use ath10k_rx_desc_ops
+ * for correctly accessing rx descriptor data.
+ */
+
+/* base struct used for abstracting the rx descriptor representation */
+struct htt_rx_desc {
+ union {
+ /* This field is filled on the host using the msdu buffer
+ * from htt_rx_indication
+ */
+ struct fw_rx_desc_base fw_desc;
+ u32 pad;
+ } __packed;
+} __packed;
+
+/* rx descriptor for wcn3990 and possibly extensible for newer cards
+ * Buffers like this are placed on the rx ring.
+ */
+struct htt_rx_desc_v2 {
+ struct htt_rx_desc base;
+ struct {
+ struct rx_attention attention;
+ struct rx_frag_info frag_info;
+ struct rx_mpdu_start mpdu_start;
+ struct rx_msdu_start msdu_start;
+ struct rx_msdu_end msdu_end;
+ struct rx_mpdu_end mpdu_end;
+ struct rx_ppdu_start ppdu_start;
+ struct rx_ppdu_end ppdu_end;
+ } __packed;
+ u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+ u8 msdu_payload[];
+};
+
+/* QCA6174, QCA988x, QCA99x0 dedicated rx descriptor to make sure their firmware
+ * works correctly. We keep a single rx descriptor for all these three
+ * families of cards because from tests it seems to be the most stable solution,
+ * e.g. having a rx descriptor only for QCA6174 seldom caused firmware crashes
+ * during some tests.
+ * Buffers like this are placed on the rx ring.
+ */
+struct htt_rx_desc_v1 {
+ struct htt_rx_desc base;
+ struct {
+ struct rx_attention attention;
+ struct rx_frag_info_v1 frag_info;
+ struct rx_mpdu_start mpdu_start;
+ struct rx_msdu_start_v1 msdu_start;
+ struct rx_msdu_end_v1 msdu_end;
+ struct rx_mpdu_end mpdu_end;
+ struct rx_ppdu_start ppdu_start;
+ struct rx_ppdu_end_v1 ppdu_end;
+ } __packed;
+ u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+ u8 msdu_payload[];
+};
+
+/* rx_desc abstraction */
+struct ath10k_htt_rx_desc_ops {
+ /* These fields are mandatory, they must be specified in any instance */
+
+ /* sizeof() of the rx_desc structure used by this hw */
+ size_t rx_desc_size;
+
+ /* offset of msdu_payload inside the rx_desc structure used by this hw */
+ size_t rx_desc_msdu_payload_offset;
+
+ /* These fields are options.
+ * When a field is not provided the default implementation gets used
+ * (see the ath10k_rx_desc_* operations below for more info about the defaults)
+ */
+ bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd);
+ int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
+
+ /* Safely cast from a void* buffer containing an rx descriptor
+ * to the proper rx_desc structure
+ */
+ struct htt_rx_desc *(*rx_desc_from_raw_buffer)(void *buff);
+
+ void (*rx_desc_get_offsets)(struct htt_rx_ring_rx_desc_offsets *offs);
+ struct rx_attention *(*rx_desc_get_attention)(struct htt_rx_desc *rxd);
+ struct rx_frag_info_common *(*rx_desc_get_frag_info)(struct htt_rx_desc *rxd);
+ struct rx_mpdu_start *(*rx_desc_get_mpdu_start)(struct htt_rx_desc *rxd);
+ struct rx_mpdu_end *(*rx_desc_get_mpdu_end)(struct htt_rx_desc *rxd);
+ struct rx_msdu_start_common *(*rx_desc_get_msdu_start)(struct htt_rx_desc *rxd);
+ struct rx_msdu_end_common *(*rx_desc_get_msdu_end)(struct htt_rx_desc *rxd);
+ struct rx_ppdu_start *(*rx_desc_get_ppdu_start)(struct htt_rx_desc *rxd);
+ struct rx_ppdu_end_common *(*rx_desc_get_ppdu_end)(struct htt_rx_desc *rxd);
+ u8 *(*rx_desc_get_rx_hdr_status)(struct htt_rx_desc *rxd);
+ u8 *(*rx_desc_get_msdu_payload)(struct htt_rx_desc *rxd);
+};
+
+extern const struct ath10k_htt_rx_desc_ops qca988x_rx_desc_ops;
+extern const struct ath10k_htt_rx_desc_ops qca99x0_rx_desc_ops;
+extern const struct ath10k_htt_rx_desc_ops wcn3990_rx_desc_ops;
+
+static inline int
+ath10k_htt_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ if (hw->rx_desc_ops->rx_desc_get_l3_pad_bytes)
+ return hw->rx_desc_ops->rx_desc_get_l3_pad_bytes(rxd);
+ return 0;
+}
+
+static inline bool
+ath10k_htt_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ if (hw->rx_desc_ops->rx_desc_get_msdu_limit_error)
+ return hw->rx_desc_ops->rx_desc_get_msdu_limit_error(rxd);
+ return false;
+}
+
+/* The default implementation of all these getters is using the old rx_desc,
+ * so that it is easier to define the ath10k_htt_rx_desc_ops instances.
+ * But probably, if new wireless cards must be supported, it would be better
+ * to switch the default implementation to the new rx_desc, since this would
+ * make the extension easier .
+ */
+static inline struct htt_rx_desc *
+ath10k_htt_rx_desc_from_raw_buffer(struct ath10k_hw_params *hw, void *buff)
+{
+ if (hw->rx_desc_ops->rx_desc_from_raw_buffer)
+ return hw->rx_desc_ops->rx_desc_from_raw_buffer(buff);
+ return &((struct htt_rx_desc_v1 *)buff)->base;
+}
+
+static inline void
+ath10k_htt_rx_desc_get_offsets(struct ath10k_hw_params *hw,
+ struct htt_rx_ring_rx_desc_offsets *off)
+{
+ if (hw->rx_desc_ops->rx_desc_get_offsets) {
+ hw->rx_desc_ops->rx_desc_get_offsets(off);
+ } else {
+#define desc_offset(x) (offsetof(struct htt_rx_desc_v1, x) / 4)
+ off->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ off->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ off->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ off->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ off->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ off->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ off->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ off->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ off->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ off->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+ }
+}
+
+static inline struct rx_attention *
+ath10k_htt_rx_desc_get_attention(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_attention)
+ return hw->rx_desc_ops->rx_desc_get_attention(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->attention;
+}
+
+static inline struct rx_frag_info_common *
+ath10k_htt_rx_desc_get_frag_info(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_frag_info)
+ return hw->rx_desc_ops->rx_desc_get_frag_info(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->frag_info.common;
+}
+
+static inline struct rx_mpdu_start *
+ath10k_htt_rx_desc_get_mpdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_mpdu_start)
+ return hw->rx_desc_ops->rx_desc_get_mpdu_start(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->mpdu_start;
+}
+
+static inline struct rx_mpdu_end *
+ath10k_htt_rx_desc_get_mpdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_mpdu_end)
+ return hw->rx_desc_ops->rx_desc_get_mpdu_end(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->mpdu_end;
+}
+
+static inline struct rx_msdu_start_common *
+ath10k_htt_rx_desc_get_msdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_msdu_start)
+ return hw->rx_desc_ops->rx_desc_get_msdu_start(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->msdu_start.common;
+}
+
+static inline struct rx_msdu_end_common *
+ath10k_htt_rx_desc_get_msdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_msdu_end)
+ return hw->rx_desc_ops->rx_desc_get_msdu_end(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->msdu_end.common;
+}
+
+static inline struct rx_ppdu_start *
+ath10k_htt_rx_desc_get_ppdu_start(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_ppdu_start)
+ return hw->rx_desc_ops->rx_desc_get_ppdu_start(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->ppdu_start;
+}
+
+static inline struct rx_ppdu_end_common *
+ath10k_htt_rx_desc_get_ppdu_end(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_ppdu_end)
+ return hw->rx_desc_ops->rx_desc_get_ppdu_end(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return &rx_desc->ppdu_end.common;
+}
+
+static inline u8 *
+ath10k_htt_rx_desc_get_rx_hdr_status(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_rx_hdr_status)
+ return hw->rx_desc_ops->rx_desc_get_rx_hdr_status(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return rx_desc->rx_hdr_status;
+}
+
+static inline u8 *
+ath10k_htt_rx_desc_get_msdu_payload(struct ath10k_hw_params *hw, struct htt_rx_desc *rxd)
+{
+ struct htt_rx_desc_v1 *rx_desc;
+
+ if (hw->rx_desc_ops->rx_desc_get_msdu_payload)
+ return hw->rx_desc_ops->rx_desc_get_msdu_payload(rxd);
+
+ rx_desc = container_of(rxd, struct htt_rx_desc_v1, base);
+ return rx_desc->msdu_payload;
+}
+
+#define HTT_RX_DESC_HL_INFO_SEQ_NUM_MASK 0x00000fff
+#define HTT_RX_DESC_HL_INFO_SEQ_NUM_LSB 0
+#define HTT_RX_DESC_HL_INFO_ENCRYPTED_MASK 0x00001000
+#define HTT_RX_DESC_HL_INFO_ENCRYPTED_LSB 12
+#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_MASK 0x00002000
+#define HTT_RX_DESC_HL_INFO_CHAN_INFO_PRESENT_LSB 13
+#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_MASK 0x00010000
+#define HTT_RX_DESC_HL_INFO_MCAST_BCAST_LSB 16
+#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_MASK 0x01fe0000
+#define HTT_RX_DESC_HL_INFO_KEY_ID_OCT_LSB 17
+
+struct htt_rx_desc_base_hl {
+ __le32 info; /* HTT_RX_DESC_HL_INFO_ */
+};
+
+struct htt_rx_chan_info {
+ __le16 primary_chan_center_freq_mhz;
+ __le16 contig_chan1_center_freq_mhz;
+ __le16 contig_chan2_center_freq_mhz;
+ u8 phy_mode;
+ u8 reserved;
+} __packed;
+
+#define HTT_RX_DESC_ALIGN 8
+
+#define HTT_MAC_ADDR_LEN 6
+
+/*
+ * FIX THIS
+ * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
+ * rounded up to a cache line size.
+ */
+#define HTT_RX_BUF_SIZE 2048
+
+/* The HTT_RX_MSDU_SIZE can't be statically computed anymore,
+ * because it depends on the underlying device rx_desc representation
+ */
+static inline int ath10k_htt_rx_msdu_size(struct ath10k_hw_params *hw)
+{
+ return HTT_RX_BUF_SIZE - (int)hw->rx_desc_ops->rx_desc_size;
+}
+
+/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
+ * aggregated traffic more nicely.
+ */
+#define ATH10K_HTT_MAX_NUM_REFILL 100
+
+/*
+ * DMA_MAP expects the buffer to be an integral number of cache lines.
+ * Rather than checking the actual cache line size, this code makes a
+ * conservative estimate of what the cache line size could be.
+ */
+#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
+#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
+
+/* These values are default in most firmware revisions and apparently are a
+ * sweet spot performance wise.
+ */
+#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
+#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
+
+int ath10k_htt_connect(struct ath10k_htt *htt);
+int ath10k_htt_init(struct ath10k *ar);
+int ath10k_htt_setup(struct ath10k_htt *htt);
+
+int ath10k_htt_tx_start(struct ath10k_htt *htt);
+void ath10k_htt_tx_stop(struct ath10k_htt *htt);
+void ath10k_htt_tx_destroy(struct ath10k_htt *htt);
+void ath10k_htt_tx_free(struct ath10k_htt *htt);
+
+int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
+int ath10k_htt_rx_ring_refill(struct ath10k *ar);
+void ath10k_htt_rx_free(struct ath10k_htt *htt);
+
+void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask,
+ u64 cookie);
+void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+ __le32 token,
+ __le16 fetch_seq_num,
+ struct htt_tx_fetch_record *records,
+ size_t num_records);
+void ath10k_htt_op_ep_tx_credits(struct ath10k *ar);
+
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_sync(struct ath10k *ar);
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+ bool is_presp);
+
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
+void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
+void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
+ struct sk_buff *skb);
+int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
+int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget);
+void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
+void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
new file mode 100644
index 000000000000..d7e429041065
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -0,0 +1,4474 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/export.h>
+
+#include "core.h"
+#include "htc.h"
+#include "htt.h"
+#include "txrx.h"
+#include "debug.h"
+#include "trace.h"
+#include "mac.h"
+
+#include <linux/log2.h>
+#include <linux/bitfield.h>
+
+/* when under memory pressure rx ring refill may fail and needs a retry */
+#define HTT_RX_RING_REFILL_RETRY_MS 50
+
+#define HTT_RX_RING_REFILL_RESCHED_MS 5
+
+/* shortcut to interpret a raw memory buffer as a rx descriptor */
+#define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf)
+
+static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb);
+
+static struct sk_buff *
+ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
+{
+ struct ath10k_skb_rxcb *rxcb;
+
+ hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
+ if (rxcb->paddr == paddr)
+ return ATH10K_RXCB_SKB(rxcb);
+
+ WARN_ON_ONCE(1);
+ return NULL;
+}
+
+static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
+{
+ struct sk_buff *skb;
+ struct ath10k_skb_rxcb *rxcb;
+ struct hlist_node *n;
+ int i;
+
+ if (htt->rx_ring.in_ord_rx) {
+ hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
+ skb = ATH10K_RXCB_SKB(rxcb);
+ dma_unmap_single(htt->ar->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ hash_del(&rxcb->hlist);
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+ for (i = 0; i < htt->rx_ring.size; i++) {
+ skb = htt->rx_ring.netbufs_ring[i];
+ if (!skb)
+ continue;
+
+ rxcb = ATH10K_SKB_RXCB(skb);
+ dma_unmap_single(htt->ar->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+ }
+
+ htt->rx_ring.fill_cnt = 0;
+ hash_init(htt->rx_ring.skb_table);
+ memset(htt->rx_ring.netbufs_ring, 0,
+ htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
+}
+
+static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
+{
+ return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
+}
+
+static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
+{
+ return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
+}
+
+static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
+ void *vaddr)
+{
+ htt->rx_ring.paddrs_ring_32 = vaddr;
+}
+
+static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
+ void *vaddr)
+{
+ htt->rx_ring.paddrs_ring_64 = vaddr;
+}
+
+static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
+ dma_addr_t paddr, int idx)
+{
+ htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
+}
+
+static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
+ dma_addr_t paddr, int idx)
+{
+ htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
+}
+
+static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
+{
+ htt->rx_ring.paddrs_ring_32[idx] = 0;
+}
+
+static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
+{
+ htt->rx_ring.paddrs_ring_64[idx] = 0;
+}
+
+static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
+{
+ return (void *)htt->rx_ring.paddrs_ring_32;
+}
+
+static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
+{
+ return (void *)htt->rx_ring.paddrs_ring_64;
+}
+
+static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+{
+ struct ath10k_hw_params *hw = &htt->ar->hw_params;
+ struct htt_rx_desc *rx_desc;
+ struct ath10k_skb_rxcb *rxcb;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret = 0, idx;
+
+ /* The Full Rx Reorder firmware has no way of telling the host
+ * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
+ * To keep things simple make sure ring is always half empty. This
+ * guarantees there'll be no replenishment overruns possible.
+ */
+ BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
+
+ idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+ if (idx < 0 || idx >= htt->rx_ring.size) {
+ ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
+ idx &= htt->rx_ring.size_mask;
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ while (num > 0) {
+ skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
+ skb->data);
+
+ /* Clear rx_desc attention word before posting to Rx ring */
+ rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data);
+ ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0);
+
+ paddr = dma_map_single(htt->ar->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
+ dev_kfree_skb_any(skb);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ rxcb = ATH10K_SKB_RXCB(skb);
+ rxcb->paddr = paddr;
+ htt->rx_ring.netbufs_ring[idx] = skb;
+ ath10k_htt_set_paddrs_ring(htt, paddr, idx);
+ htt->rx_ring.fill_cnt++;
+
+ if (htt->rx_ring.in_ord_rx) {
+ hash_add(htt->rx_ring.skb_table,
+ &ATH10K_SKB_RXCB(skb)->hlist,
+ paddr);
+ }
+
+ num--;
+ idx++;
+ idx &= htt->rx_ring.size_mask;
+ }
+
+fail:
+ /*
+ * Make sure the rx buffer is updated before available buffer
+ * index to avoid any potential rx ring corruption.
+ */
+ mb();
+ *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
+ return ret;
+}
+
+static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+{
+ lockdep_assert_held(&htt->rx_ring.lock);
+ return __ath10k_htt_rx_ring_fill_n(htt, num);
+}
+
+static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
+{
+ int ret, num_deficit, num_to_fill;
+
+ /* Refilling the whole RX ring buffer proves to be a bad idea. The
+ * reason is RX may take up significant amount of CPU cycles and starve
+ * other tasks, e.g. TX on an ethernet device while acting as a bridge
+ * with ath10k wlan interface. This ended up with very poor performance
+ * once CPU the host system was overwhelmed with RX on ath10k.
+ *
+ * By limiting the number of refills the replenishing occurs
+ * progressively. This in turns makes use of the fact tasklets are
+ * processed in FIFO order. This means actual RX processing can starve
+ * out refilling. If there's not enough buffers on RX ring FW will not
+ * report RX until it is refilled with enough buffers. This
+ * automatically balances load wrt to CPU power.
+ *
+ * This probably comes at a cost of lower maximum throughput but
+ * improves the average and stability.
+ */
+ spin_lock_bh(&htt->rx_ring.lock);
+ num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+ num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
+ num_deficit -= num_to_fill;
+ ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
+ if (ret == -ENOMEM) {
+ /*
+ * Failed to fill it to the desired level -
+ * we'll start a timer and try again next time.
+ * As long as enough buffers are left in the ring for
+ * another A-MPDU rx, no special recovery is needed.
+ */
+ mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
+ msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
+ } else if (num_deficit > 0) {
+ mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
+ msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
+ }
+ spin_unlock_bh(&htt->rx_ring.lock);
+}
+
+static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
+{
+ struct ath10k_htt *htt = timer_container_of(htt, t,
+ rx_ring.refill_retry_timer);
+
+ ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
+int ath10k_htt_rx_ring_refill(struct ath10k *ar)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ int ret;
+
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
+ return 0;
+
+ spin_lock_bh(&htt->rx_ring.lock);
+ ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
+ htt->rx_ring.fill_cnt));
+
+ if (ret)
+ ath10k_htt_rx_ring_free(htt);
+
+ spin_unlock_bh(&htt->rx_ring.lock);
+
+ return ret;
+}
+
+void ath10k_htt_rx_free(struct ath10k_htt *htt)
+{
+ if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
+ return;
+
+ timer_delete_sync(&htt->rx_ring.refill_retry_timer);
+
+ skb_queue_purge(&htt->rx_msdus_q);
+ skb_queue_purge(&htt->rx_in_ord_compl_q);
+ skb_queue_purge(&htt->tx_fetch_ind_q);
+
+ spin_lock_bh(&htt->rx_ring.lock);
+ ath10k_htt_rx_ring_free(htt);
+ spin_unlock_bh(&htt->rx_ring.lock);
+
+ dma_free_coherent(htt->ar->dev,
+ ath10k_htt_get_rx_ring_size(htt),
+ ath10k_htt_get_vaddr_ring(htt),
+ htt->rx_ring.base_paddr);
+
+ ath10k_htt_config_paddrs_ring(htt, NULL);
+
+ dma_free_coherent(htt->ar->dev,
+ sizeof(*htt->rx_ring.alloc_idx.vaddr),
+ htt->rx_ring.alloc_idx.vaddr,
+ htt->rx_ring.alloc_idx.paddr);
+ htt->rx_ring.alloc_idx.vaddr = NULL;
+
+ kfree(htt->rx_ring.netbufs_ring);
+ htt->rx_ring.netbufs_ring = NULL;
+}
+
+static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ int idx;
+ struct sk_buff *msdu;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ if (htt->rx_ring.fill_cnt == 0) {
+ ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
+ return NULL;
+ }
+
+ idx = htt->rx_ring.sw_rd_idx.msdu_payld;
+ msdu = htt->rx_ring.netbufs_ring[idx];
+ htt->rx_ring.netbufs_ring[idx] = NULL;
+ ath10k_htt_reset_paddrs_ring(htt, idx);
+
+ idx++;
+ idx &= htt->rx_ring.size_mask;
+ htt->rx_ring.sw_rd_idx.msdu_payld = idx;
+ htt->rx_ring.fill_cnt--;
+
+ dma_unmap_single(htt->ar->dev,
+ ATH10K_SKB_RXCB(msdu)->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
+ msdu->data, msdu->len + skb_tailroom(msdu));
+
+ return msdu;
+}
+
+/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
+static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
+ struct sk_buff_head *amsdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ int msdu_len, msdu_chaining = 0;
+ struct sk_buff *msdu;
+ struct htt_rx_desc *rx_desc;
+ struct rx_attention *rx_desc_attention;
+ struct rx_frag_info_common *rx_desc_frag_info_common;
+ struct rx_msdu_start_common *rx_desc_msdu_start_common;
+ struct rx_msdu_end_common *rx_desc_msdu_end_common;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ for (;;) {
+ int last_msdu, msdu_len_invalid, msdu_chained;
+
+ msdu = ath10k_htt_rx_netbuf_pop(htt);
+ if (!msdu) {
+ __skb_queue_purge(amsdu);
+ return -ENOENT;
+ }
+
+ __skb_queue_tail(amsdu, msdu);
+
+ rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc);
+ rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw,
+ rx_desc);
+ rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc);
+ rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc);
+
+ /* FIXME: we must report msdu payload since this is what caller
+ * expects now
+ */
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
+
+ /*
+ * Sanity check - confirm the HW is finished filling in the
+ * rx data.
+ * If the HW and SW are working correctly, then it's guaranteed
+ * that the HW's MAC DMA is done before this point in the SW.
+ * To prevent the case that we handle a stale Rx descriptor,
+ * just assert for now until we have a way to recover.
+ */
+ if (!(__le32_to_cpu(rx_desc_attention->flags)
+ & RX_ATTENTION_FLAGS_MSDU_DONE)) {
+ __skb_queue_purge(amsdu);
+ return -EIO;
+ }
+
+ msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags)
+ & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
+ RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
+ msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0),
+ RX_MSDU_START_INFO0_MSDU_LENGTH);
+ msdu_chained = rx_desc_frag_info_common->ring2_more_count;
+
+ if (msdu_len_invalid)
+ msdu_len = 0;
+
+ skb_trim(msdu, 0);
+ skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw)));
+ msdu_len -= msdu->len;
+
+ /* Note: Chained buffers do not contain rx descriptor */
+ while (msdu_chained--) {
+ msdu = ath10k_htt_rx_netbuf_pop(htt);
+ if (!msdu) {
+ __skb_queue_purge(amsdu);
+ return -ENOENT;
+ }
+
+ __skb_queue_tail(amsdu, msdu);
+ skb_trim(msdu, 0);
+ skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
+ msdu_len -= msdu->len;
+ msdu_chaining = 1;
+ }
+
+ last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) &
+ RX_MSDU_END_INFO0_LAST_MSDU;
+
+ /* FIXME: why are we skipping the first part of the rx_desc? */
+ trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32),
+ hw->rx_desc_ops->rx_desc_size - sizeof(u32));
+
+ if (last_msdu)
+ break;
+ }
+
+ if (skb_queue_empty(amsdu))
+ msdu_chaining = -1;
+
+ /*
+ * Don't refill the ring yet.
+ *
+ * First, the elements popped here are still in use - it is not
+ * safe to overwrite them until the matching call to
+ * mpdu_desc_list_next. Second, for efficiency it is preferable to
+ * refill the rx ring with 1 PPDU's worth of rx buffers (something
+ * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
+ * (something like 3 buffers). Consequently, we'll rely on the txrx
+ * SW to tell us when it is done pulling all the PPDU's rx buffers
+ * out of the rx ring, and then refill it just once.
+ */
+
+ return msdu_chaining;
+}
+
+static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
+ u64 paddr)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_skb_rxcb *rxcb;
+ struct sk_buff *msdu;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
+ if (!msdu)
+ return NULL;
+
+ rxcb = ATH10K_SKB_RXCB(msdu);
+ hash_del(&rxcb->hlist);
+ htt->rx_ring.fill_cnt--;
+
+ dma_unmap_single(htt->ar->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
+ msdu->data, msdu->len + skb_tailroom(msdu));
+
+ return msdu;
+}
+
+static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head,
+ struct sk_buff *frag_list,
+ unsigned int frag_len)
+{
+ skb_shinfo(skb_head)->frag_list = frag_list;
+ skb_head->data_len = frag_len;
+ skb_head->len += skb_head->data_len;
+}
+
+static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
+ struct sk_buff *msdu,
+ struct htt_rx_in_ord_msdu_desc **msdu_desc)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ u32 paddr;
+ struct sk_buff *frag_buf;
+ struct sk_buff *prev_frag_buf;
+ u8 last_frag;
+ struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc;
+ struct htt_rx_desc *rxd;
+ int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
+
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
+
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
+ amsdu_len -= msdu->len;
+
+ last_frag = ind_desc->reserved;
+ if (last_frag) {
+ if (amsdu_len) {
+ ath10k_warn(ar, "invalid amsdu len %u, left %d",
+ __le16_to_cpu(ind_desc->msdu_len),
+ amsdu_len);
+ }
+ return 0;
+ }
+
+ ind_desc++;
+ paddr = __le32_to_cpu(ind_desc->msdu_paddr);
+ frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!frag_buf) {
+ ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr);
+ return -ENOENT;
+ }
+
+ skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
+ ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
+
+ amsdu_len -= frag_buf->len;
+ prev_frag_buf = frag_buf;
+ last_frag = ind_desc->reserved;
+ while (!last_frag) {
+ ind_desc++;
+ paddr = __le32_to_cpu(ind_desc->msdu_paddr);
+ frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!frag_buf) {
+ ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x",
+ paddr);
+ prev_frag_buf->next = NULL;
+ return -ENOENT;
+ }
+
+ skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
+ last_frag = ind_desc->reserved;
+ amsdu_len -= frag_buf->len;
+
+ prev_frag_buf->next = frag_buf;
+ prev_frag_buf = frag_buf;
+ }
+
+ if (amsdu_len) {
+ ath10k_warn(ar, "invalid amsdu len %u, left %d",
+ __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
+ }
+
+ *msdu_desc = ind_desc;
+
+ prev_frag_buf->next = NULL;
+ return 0;
+}
+
+static int
+ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
+ struct sk_buff *msdu,
+ struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ u64 paddr;
+ struct sk_buff *frag_buf;
+ struct sk_buff *prev_frag_buf;
+ u8 last_frag;
+ struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc;
+ struct htt_rx_desc *rxd;
+ int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
+
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
+
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
+ amsdu_len -= msdu->len;
+
+ last_frag = ind_desc->reserved;
+ if (last_frag) {
+ if (amsdu_len) {
+ ath10k_warn(ar, "invalid amsdu len %u, left %d",
+ __le16_to_cpu(ind_desc->msdu_len),
+ amsdu_len);
+ }
+ return 0;
+ }
+
+ ind_desc++;
+ paddr = __le64_to_cpu(ind_desc->msdu_paddr);
+ frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!frag_buf) {
+ ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr);
+ return -ENOENT;
+ }
+
+ skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
+ ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
+
+ amsdu_len -= frag_buf->len;
+ prev_frag_buf = frag_buf;
+ last_frag = ind_desc->reserved;
+ while (!last_frag) {
+ ind_desc++;
+ paddr = __le64_to_cpu(ind_desc->msdu_paddr);
+ frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!frag_buf) {
+ ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx",
+ paddr);
+ prev_frag_buf->next = NULL;
+ return -ENOENT;
+ }
+
+ skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
+ last_frag = ind_desc->reserved;
+ amsdu_len -= frag_buf->len;
+
+ prev_frag_buf->next = frag_buf;
+ prev_frag_buf = frag_buf;
+ }
+
+ if (amsdu_len) {
+ ath10k_warn(ar, "invalid amsdu len %u, left %d",
+ __le16_to_cpu(ind_desc->msdu_len), amsdu_len);
+ }
+
+ *msdu_desc = ind_desc;
+
+ prev_frag_buf->next = NULL;
+ return 0;
+}
+
+static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
+ struct htt_rx_in_ord_ind *ev,
+ struct sk_buff_head *list)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
+ struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
+ struct sk_buff *msdu;
+ int msdu_count, ret;
+ bool is_offload;
+ u32 paddr;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ msdu_count = __le16_to_cpu(ev->msdu_count);
+ is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+
+ while (msdu_count--) {
+ paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
+
+ msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!msdu) {
+ __skb_queue_purge(list);
+ return -ENOENT;
+ }
+
+ if (!is_offload && ar->monitor_arvif) {
+ ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu,
+ &msdu_desc);
+ if (ret) {
+ __skb_queue_purge(list);
+ return ret;
+ }
+ __skb_queue_tail(list, msdu);
+ msdu_desc++;
+ continue;
+ }
+
+ __skb_queue_tail(list, msdu);
+
+ if (!is_offload) {
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
+
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
+
+ if (!(__le32_to_cpu(rxd_attention->flags) &
+ RX_ATTENTION_FLAGS_MSDU_DONE)) {
+ ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
+ return -EIO;
+ }
+ }
+
+ msdu_desc++;
+ }
+
+ return 0;
+}
+
+static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
+ struct htt_rx_in_ord_ind *ev,
+ struct sk_buff_head *list)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
+ struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
+ struct sk_buff *msdu;
+ int msdu_count, ret;
+ bool is_offload;
+ u64 paddr;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ msdu_count = __le16_to_cpu(ev->msdu_count);
+ is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+
+ while (msdu_count--) {
+ paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
+ msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!msdu) {
+ __skb_queue_purge(list);
+ return -ENOENT;
+ }
+
+ if (!is_offload && ar->monitor_arvif) {
+ ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu,
+ &msdu_desc);
+ if (ret) {
+ __skb_queue_purge(list);
+ return ret;
+ }
+ __skb_queue_tail(list, msdu);
+ msdu_desc++;
+ continue;
+ }
+
+ __skb_queue_tail(list, msdu);
+
+ if (!is_offload) {
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+
+ trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
+
+ skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
+ skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
+
+ if (!(__le32_to_cpu(rxd_attention->flags) &
+ RX_ATTENTION_FLAGS_MSDU_DONE)) {
+ ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
+ return -EIO;
+ }
+ }
+
+ msdu_desc++;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ dma_addr_t paddr;
+ void *vaddr, *vaddr_ring;
+ size_t size;
+ struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
+
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
+ return 0;
+
+ htt->rx_confused = false;
+
+ /* XXX: The fill level could be changed during runtime in response to
+ * the host processing latency. Is this really worth it?
+ */
+ htt->rx_ring.size = HTT_RX_RING_SIZE;
+ htt->rx_ring.size_mask = htt->rx_ring.size - 1;
+ htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
+
+ if (!is_power_of_2(htt->rx_ring.size)) {
+ ath10k_warn(ar, "htt rx ring size is not power of 2\n");
+ return -EINVAL;
+ }
+
+ htt->rx_ring.netbufs_ring =
+ kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!htt->rx_ring.netbufs_ring)
+ goto err_netbuf;
+
+ size = ath10k_htt_get_rx_ring_size(htt);
+
+ vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
+ if (!vaddr_ring)
+ goto err_dma_ring;
+
+ ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
+ htt->rx_ring.base_paddr = paddr;
+
+ vaddr = dma_alloc_coherent(htt->ar->dev,
+ sizeof(*htt->rx_ring.alloc_idx.vaddr),
+ &paddr, GFP_KERNEL);
+ if (!vaddr)
+ goto err_dma_idx;
+
+ htt->rx_ring.alloc_idx.vaddr = vaddr;
+ htt->rx_ring.alloc_idx.paddr = paddr;
+ htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
+ *htt->rx_ring.alloc_idx.vaddr = 0;
+
+ /* Initialize the Rx refill retry timer */
+ timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
+
+ spin_lock_init(&htt->rx_ring.lock);
+
+ htt->rx_ring.fill_cnt = 0;
+ htt->rx_ring.sw_rd_idx.msdu_payld = 0;
+ hash_init(htt->rx_ring.skb_table);
+
+ skb_queue_head_init(&htt->rx_msdus_q);
+ skb_queue_head_init(&htt->rx_in_ord_compl_q);
+ skb_queue_head_init(&htt->tx_fetch_ind_q);
+ atomic_set(&htt->num_mpdus_ready, 0);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
+ htt->rx_ring.size, htt->rx_ring.fill_level);
+ return 0;
+
+err_dma_idx:
+ dma_free_coherent(htt->ar->dev,
+ ath10k_htt_get_rx_ring_size(htt),
+ vaddr_ring,
+ htt->rx_ring.base_paddr);
+ ath10k_htt_config_paddrs_ring(htt, NULL);
+err_dma_ring:
+ kfree(htt->rx_ring.netbufs_ring);
+ htt->rx_ring.netbufs_ring = NULL;
+err_netbuf:
+ return -ENOMEM;
+}
+
+static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
+{
+ switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ return 0;
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
+ return IEEE80211_WEP_IV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+ return IEEE80211_TKIP_IV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ return IEEE80211_CCMP_HDR_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ return IEEE80211_CCMP_256_HDR_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return IEEE80211_GCMP_HDR_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
+ }
+
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
+ return 0;
+}
+
+#define MICHAEL_MIC_LEN 8
+
+static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
+{
+ switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+ return 0;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ return IEEE80211_CCMP_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ return IEEE80211_CCMP_256_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return IEEE80211_GCMP_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
+ }
+
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
+ return 0;
+}
+
+static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
+{
+ switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return 0;
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
+ return IEEE80211_WEP_ICV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+ return IEEE80211_TKIP_ICV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
+ }
+
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
+ return 0;
+}
+
+struct amsdu_subframe_hdr {
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 len;
+} __packed;
+
+#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
+
+static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
+{
+ u8 ret = 0;
+
+ switch (bw) {
+ case 0:
+ ret = RATE_INFO_BW_20;
+ break;
+ case 1:
+ ret = RATE_INFO_BW_40;
+ break;
+ case 2:
+ ret = RATE_INFO_BW_80;
+ break;
+ case 3:
+ ret = RATE_INFO_BW_160;
+ break;
+ }
+
+ return ret;
+}
+
+static void ath10k_htt_rx_h_rates(struct ath10k *ar,
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd)
+{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_attention *rxd_attention;
+ struct rx_mpdu_start *rxd_mpdu_start;
+ struct rx_mpdu_end *rxd_mpdu_end;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ struct rx_ppdu_start *rxd_ppdu_start;
+ struct ieee80211_supported_band *sband;
+ u8 cck, rate, bw, sgi, mcs, nss;
+ u8 *rxd_msdu_payload;
+ u8 preamble = 0;
+ u8 group_id;
+ u32 info1, info2, info3;
+ u32 stbc, nsts_su;
+
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+ rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd);
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
+ rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd);
+
+ info1 = __le32_to_cpu(rxd_ppdu_start->info1);
+ info2 = __le32_to_cpu(rxd_ppdu_start->info2);
+ info3 = __le32_to_cpu(rxd_ppdu_start->info3);
+
+ preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
+
+ switch (preamble) {
+ case HTT_RX_LEGACY:
+ /* To get legacy rate index band is required. Since band can't
+ * be undefined check if freq is non-zero.
+ */
+ if (!status->freq)
+ return;
+
+ cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
+ rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
+ rate &= ~RX_PPDU_START_RATE_FLAG;
+
+ sband = &ar->mac.sbands[status->band];
+ status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
+ break;
+ case HTT_RX_HT:
+ case HTT_RX_HT_WITH_TXBF:
+ /* HT-SIG - Table 20-11 in info2 and info3 */
+ mcs = info2 & 0x1F;
+ nss = mcs >> 3;
+ bw = (info2 >> 7) & 1;
+ sgi = (info3 >> 7) & 1;
+
+ status->rate_idx = mcs;
+ status->encoding = RX_ENC_HT;
+ if (sgi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (bw)
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case HTT_RX_VHT:
+ case HTT_RX_VHT_WITH_TXBF:
+ /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
+ * TODO check this
+ */
+ bw = info2 & 3;
+ sgi = info3 & 1;
+ stbc = (info2 >> 3) & 1;
+ group_id = (info2 >> 4) & 0x3F;
+
+ if (GROUP_ID_IS_SU_MIMO(group_id)) {
+ mcs = (info3 >> 4) & 0x0F;
+ nsts_su = ((info2 >> 10) & 0x07);
+ if (stbc)
+ nss = (nsts_su >> 2) + 1;
+ else
+ nss = (nsts_su + 1);
+ } else {
+ /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
+ * so it's impossible to decode MCS. Also since
+ * firmware consumes Group Id Management frames host
+ * has no knowledge regarding group/user position
+ * mapping so it's impossible to pick the correct Nsts
+ * from VHT-SIG-A1.
+ *
+ * Bandwidth and SGI are valid so report the rateinfo
+ * on best-effort basis.
+ */
+ mcs = 0;
+ nss = 1;
+ }
+
+ if (mcs > 0x09) {
+ ath10k_warn(ar, "invalid MCS received %u\n", mcs);
+ ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
+ __le32_to_cpu(rxd_attention->flags),
+ __le32_to_cpu(rxd_mpdu_start->info0),
+ __le32_to_cpu(rxd_mpdu_start->info1),
+ __le32_to_cpu(rxd_msdu_start_common->info0),
+ __le32_to_cpu(rxd_msdu_start_common->info1),
+ rxd_ppdu_start->info0,
+ __le32_to_cpu(rxd_ppdu_start->info1),
+ __le32_to_cpu(rxd_ppdu_start->info2),
+ __le32_to_cpu(rxd_ppdu_start->info3),
+ __le32_to_cpu(rxd_ppdu_start->info4));
+
+ ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
+ __le32_to_cpu(rxd_msdu_end_common->info0),
+ __le32_to_cpu(rxd_mpdu_end->info0));
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
+ "rx desc msdu payload: ",
+ rxd_msdu_payload, 50);
+ }
+
+ status->rate_idx = mcs;
+ status->nss = nss;
+
+ if (sgi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ status->bw = ath10k_bw_to_mac80211_bw(bw);
+ status->encoding = RX_ENC_VHT;
+ break;
+ default:
+ break;
+ }
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
+{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_attention *rxd_attention;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ struct rx_mpdu_start *rxd_mpdu_start;
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ struct cfg80211_chan_def def;
+ u16 peer_id;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!rxd)
+ return NULL;
+
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+
+ if (rxd_attention->flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
+ return NULL;
+
+ if (!(rxd_msdu_end_common->info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
+ return NULL;
+
+ peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0),
+ RX_MPDU_START_INFO0_PEER_IDX);
+
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer)
+ return NULL;
+
+ arvif = ath10k_get_arvif(ar, peer->vdev_id);
+ if (WARN_ON_ONCE(!arvif))
+ return NULL;
+
+ if (ath10k_mac_vif_chan(arvif->vif, &def))
+ return NULL;
+
+ return def.chan;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
+{
+ struct ath10k_vif *arvif;
+ struct cfg80211_chan_def def;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_id == vdev_id &&
+ ath10k_mac_vif_chan(arvif->vif, &def) == 0)
+ return def.chan;
+ }
+
+ return NULL;
+}
+
+static void
+ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def *def = data;
+
+ *def = conf->def;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_any_channel(struct ath10k *ar)
+{
+ struct cfg80211_chan_def def = {};
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_htt_rx_h_any_chan_iter,
+ &def);
+
+ return def.chan;
+}
+
+static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd,
+ u32 vdev_id)
+{
+ struct ieee80211_channel *ch;
+
+ spin_lock_bh(&ar->data_lock);
+ ch = ar->scan_channel;
+ if (!ch)
+ ch = ar->rx_channel;
+ if (!ch)
+ ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
+ if (!ch)
+ ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
+ if (!ch)
+ ch = ath10k_htt_rx_h_any_channel(ar);
+ if (!ch)
+ ch = ar->tgt_oper_chan;
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!ch)
+ return false;
+
+ status->band = ch->band;
+ status->freq = ch->center_freq;
+
+ return true;
+}
+
+static void ath10k_htt_rx_h_signal(struct ath10k *ar,
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd)
+{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
+ int i;
+
+ for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
+ status->chains &= ~BIT(i);
+
+ if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) {
+ status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
+ rxd_ppdu_start->rssi_chains[i].pri20_mhz;
+
+ status->chains |= BIT(i);
+ }
+ }
+
+ /* FIXME: Get real NF */
+ status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ rxd_ppdu_start->rssi_comb;
+ status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
+}
+
+static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd)
+{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct rx_ppdu_end_common *rxd_ppdu_end_common;
+
+ rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd);
+
+ /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
+ * means all prior MSDUs in a PPDU are reported to mac80211 without the
+ * TSF. Is it worth holding frames until end of PPDU is known?
+ *
+ * FIXME: Can we get/compute 64bit TSF?
+ */
+ status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp);
+ status->flag |= RX_FLAG_MACTIME_END;
+}
+
+static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *status,
+ u32 vdev_id)
+{
+ struct sk_buff *first;
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
+ bool is_first_ppdu;
+ bool is_last_ppdu;
+
+ if (skb_queue_empty(amsdu))
+ return;
+
+ first = skb_peek(amsdu);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+
+ is_first_ppdu = !!(rxd_attention->flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
+ is_last_ppdu = !!(rxd_attention->flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
+
+ if (is_first_ppdu) {
+ /* New PPDU starts so clear out the old per-PPDU status. */
+ status->freq = 0;
+ status->rate_idx = 0;
+ status->nss = 0;
+ status->encoding = RX_ENC_LEGACY;
+ status->bw = RATE_INFO_BW_20;
+
+ status->flag &= ~RX_FLAG_MACTIME;
+ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
+ status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
+ status->ampdu_reference = ar->ampdu_reference;
+
+ ath10k_htt_rx_h_signal(ar, status, rxd);
+ ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
+ ath10k_htt_rx_h_rates(ar, status, rxd);
+ }
+
+ if (is_last_ppdu) {
+ ath10k_htt_rx_h_mactime(ar, status, rxd);
+
+ /* set ampdu last segment flag */
+ status->flag |= RX_FLAG_AMPDU_IS_LAST;
+ ar->ampdu_reference++;
+ }
+}
+
+static const char * const tid_to_ac[] = {
+ "BE",
+ "BK",
+ "BK",
+ "BE",
+ "VI",
+ "VI",
+ "VO",
+ "VO",
+};
+
+static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
+{
+ u8 *qc;
+ int tid;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return "";
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
+ if (tid < 8)
+ snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
+ else
+ snprintf(out, size, "tid %d", tid);
+
+ return out;
+}
+
+static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *status;
+
+ status = IEEE80211_SKB_RXCB(skb);
+ *status = *rx_status;
+
+ skb_queue_tail(&ar->htt.rx_msdus_q, skb);
+}
+
+static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *status;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ char tid[32];
+
+ status = IEEE80211_SKB_RXCB(skb);
+
+ if (!(ar->filter_flags & FIF_FCSFAIL) &&
+ status->flag & RX_FLAG_FAILED_FCS_CRC) {
+ ar->stats.rx_crc_err_drop++;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_DATA,
+ "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ skb,
+ skb->len,
+ ieee80211_get_SA(hdr),
+ ath10k_get_tid(hdr, tid, sizeof(tid)),
+ is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
+ "mcast" : "ucast",
+ IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
+ (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
+ (status->encoding == RX_ENC_HT) ? "ht" : "",
+ (status->encoding == RX_ENC_VHT) ? "vht" : "",
+ (status->bw == RATE_INFO_BW_40) ? "40" : "",
+ (status->bw == RATE_INFO_BW_80) ? "80" : "",
+ (status->bw == RATE_INFO_BW_160) ? "160" : "",
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->nss,
+ status->freq,
+ status->band, status->flag,
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+ !!(status->flag & RX_FLAG_MMIC_ERROR),
+ !!(status->flag & RX_FLAG_AMSDU_MORE));
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+ skb->data, skb->len);
+ trace_ath10k_rx_hdr(ar, skb->data, skb->len);
+ trace_ath10k_rx_payload(ar, skb->data, skb->len);
+
+ ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
+}
+
+static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
+ struct ieee80211_hdr *hdr)
+{
+ int len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+ ar->running_fw->fw_file.fw_features))
+ len = round_up(len, 4);
+
+ return len;
+}
+
+static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ enum htt_rx_mpdu_encrypt_type enctype,
+ bool is_decrypted,
+ const u8 first_hdr[64])
+{
+ struct ieee80211_hdr *hdr;
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ size_t hdr_len;
+ size_t crypto_len;
+ bool is_first;
+ bool is_last;
+ bool msdu_limit_err;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+ u8 *qos;
+
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ is_first = !!(rxd_msdu_end_common->info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd_msdu_end_common->info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+
+ /* Delivered decapped frame:
+ * [802.11 header]
+ * [crypto param] <-- can be trimmed if !fcs_err &&
+ * !decrypt_err && !peer_idx_invalid
+ * [amsdu header] <-- only if A-MSDU
+ * [rfc1042/llc]
+ * [payload]
+ * [FCS] <-- at end, needs to be trimmed
+ */
+
+ /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
+ * deaggregate, so that unwanted MSDU-deaggregation is avoided for
+ * error packets. If limit exceeds, hw sends all remaining MSDUs as
+ * a single last MSDU with this msdu limit error set.
+ */
+ msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd);
+
+ /* If MSDU limit error happens, then don't warn on, the partial raw MSDU
+ * without first MSDU is expected in that case, and handled later here.
+ */
+ /* This probably shouldn't happen but warn just in case */
+ if (WARN_ON_ONCE(!is_first && !msdu_limit_err))
+ return;
+
+ /* This probably shouldn't happen but warn just in case */
+ if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err))
+ return;
+
+ skb_trim(msdu, msdu->len - FCS_LEN);
+
+ /* Push original 80211 header */
+ if (unlikely(msdu_limit_err)) {
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
+
+ if (crypto_len)
+ memcpy(skb_push(msdu, crypto_len),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ crypto_len);
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+ }
+
+ /* In most cases this will be true for sniffed frames. It makes sense
+ * to deliver them as-is without stripping the crypto param. This is
+ * necessary for software based decryption.
+ *
+ * If there's no error then the frame is decrypted. At least that is
+ * the case for frames that come in via fragmented rx indication.
+ */
+ if (!is_decrypted)
+ return;
+
+ /* The payload is decrypted so strip crypto params. Start from tail
+ * since hdr is used to compute some stuff.
+ */
+
+ hdr = (void *)msdu->data;
+
+ /* Tail */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_mic_len(ar, enctype));
+
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_icv_len(ar, enctype));
+ } else {
+ /* MIC */
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_mic_len(ar, enctype));
+
+ /* ICV */
+ if (status->flag & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_icv_len(ar, enctype));
+ }
+
+ /* MMIC */
+ if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+ !ieee80211_has_morefrags(hdr->frame_control) &&
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
+
+ /* Head */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ memmove((void *)msdu->data + crypto_len,
+ (void *)msdu->data, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
+}
+
+static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ const u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct ieee80211_hdr *hdr;
+ struct htt_rx_desc *rxd;
+ size_t hdr_len;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ int l3_pad_bytes;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+ /* Delivered decapped frame:
+ * [nwifi 802.11 header] <-- replaced with 802.11 hdr
+ * [rfc1042/llc]
+ *
+ * Note: The nwifi header doesn't have QoS Control and is
+ * (always?) a 3addr frame.
+ *
+ * Note2: There's no A-MSDU subframe header. Even if it's part
+ * of an A-MSDU.
+ */
+
+ /* pull decapped header and copy SA & DA */
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data -
+ hw->rx_desc_ops->rx_desc_size);
+
+ l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ skb_put(msdu, l3_pad_bytes);
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
+
+ hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
+ skb_pull(msdu, hdr_len);
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath10k_htt_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ ath10k_htt_rx_crypto_param_len(ar, enctype));
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
+ struct sk_buff *msdu,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ieee80211_hdr *hdr;
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ u8 *rxd_rx_hdr_status;
+ size_t hdr_len, crypto_len;
+ void *rfc1042;
+ bool is_first, is_last, is_amsdu;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
+ hdr = (void *)rxd_rx_hdr_status;
+
+ is_first = !!(rxd_msdu_end_common->info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd_msdu_end_common->info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+ is_amsdu = !(is_first && is_last);
+
+ rfc1042 = hdr;
+
+ if (is_first) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ rfc1042 += round_up(hdr_len, bytes_aligned) +
+ round_up(crypto_len, bytes_aligned);
+ }
+
+ if (is_amsdu)
+ rfc1042 += sizeof(struct amsdu_subframe_hdr);
+
+ return rfc1042;
+}
+
+static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ const u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct ieee80211_hdr *hdr;
+ struct ethhdr *eth;
+ size_t hdr_len;
+ void *rfc1042;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ int l3_pad_bytes;
+ struct htt_rx_desc *rxd;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+ /* Delivered decapped frame:
+ * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
+ * [payload]
+ */
+
+ rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
+ if (WARN_ON_ONCE(!rfc1042))
+ return;
+
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ skb_put(msdu, l3_pad_bytes);
+ skb_pull(msdu, l3_pad_bytes);
+
+ /* pull decapped header and copy SA & DA */
+ eth = (struct ethhdr *)msdu->data;
+ ether_addr_copy(da, eth->h_dest);
+ ether_addr_copy(sa, eth->h_source);
+ skb_pull(msdu, sizeof(struct ethhdr));
+
+ /* push rfc1042/llc/snap */
+ memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
+ sizeof(struct rfc1042_hdr));
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath10k_htt_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ ath10k_htt_rx_crypto_param_len(ar, enctype));
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ const u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ int l3_pad_bytes;
+ struct htt_rx_desc *rxd;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+ /* Delivered decapped frame:
+ * [amsdu header] <-- replaced with 802.11 hdr
+ * [rfc1042/llc]
+ * [payload]
+ */
+
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+
+ skb_put(msdu, l3_pad_bytes);
+ skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
+
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath10k_htt_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ ath10k_htt_rx_crypto_param_len(ar, enctype));
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+}
+
+static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype,
+ bool is_decrypted)
+{
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct htt_rx_desc *rxd;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
+ enum rx_msdu_decap_format decap;
+
+ /* First msdu's decapped header:
+ * [802.11 header] <-- padded to 4 bytes long
+ * [crypto param] <-- padded to 4 bytes long
+ * [amsdu header] <-- only if A-MSDU
+ * [rfc1042/llc]
+ *
+ * Other (2nd, 3rd, ..) msdu's decapped header:
+ * [amsdu header] <-- only if A-MSDU
+ * [rfc1042/llc]
+ */
+
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+ switch (decap) {
+ case RX_MSDU_DECAP_RAW:
+ ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
+ is_decrypted, first_hdr);
+ break;
+ case RX_MSDU_DECAP_NATIVE_WIFI:
+ ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
+ enctype);
+ break;
+ case RX_MSDU_DECAP_ETHERNET2_DIX:
+ ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
+ break;
+ case RX_MSDU_DECAP_8023_SNAP_LLC:
+ ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
+ enctype);
+ break;
+ }
+}
+
+static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb)
+{
+ struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
+ u32 flags, info;
+ bool is_ip4, is_ip6;
+ bool is_tcp, is_udp;
+ bool ip_csum_ok, tcpudp_csum_ok;
+
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)skb->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ flags = __le32_to_cpu(rxd_attention->flags);
+ info = __le32_to_cpu(rxd_msdu_start_common->info1);
+
+ is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
+ is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
+ is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
+ is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
+ ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
+ tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
+
+ if (!is_ip4 && !is_ip6)
+ return CHECKSUM_NONE;
+ if (!is_tcp && !is_udp)
+ return CHECKSUM_NONE;
+ if (!ip_csum_ok)
+ return CHECKSUM_NONE;
+ if (!tcpudp_csum_ok)
+ return CHECKSUM_NONE;
+
+ return CHECKSUM_UNNECESSARY;
+}
+
+static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw,
+ struct sk_buff *msdu)
+{
+ msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu);
+}
+
+static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ieee80211_hdr *hdr;
+ u64 pn = 0;
+ u8 *ehdr;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ ehdr = skb->data + ieee80211_hdrlen(hdr->frame_control);
+
+ if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
+ pn = ehdr[0];
+ pn |= (u64)ehdr[1] << 8;
+ pn |= (u64)ehdr[4] << 16;
+ pn |= (u64)ehdr[5] << 24;
+ pn |= (u64)ehdr[6] << 32;
+ pn |= (u64)ehdr[7] << 40;
+ }
+ return pn;
+}
+
+static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ return !is_multicast_ether_addr(hdr->addr1);
+}
+
+static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
+ struct sk_buff *skb,
+ u16 peer_id,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ath10k_peer *peer;
+ union htt_rx_pn_t *last_pn, new_pn = {};
+ struct ieee80211_hdr *hdr;
+ u8 tid, frag_number;
+ u32 seq;
+
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
+ return false;
+ }
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ else
+ tid = ATH10K_TXRX_NON_QOS_TID;
+
+ last_pn = &peer->frag_tids_last_pn[tid];
+ new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, enctype);
+ frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+ seq = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
+
+ if (frag_number == 0) {
+ last_pn->pn48 = new_pn.pn48;
+ peer->frag_tids_seq[tid] = seq;
+ } else {
+ if (seq != peer->frag_tids_seq[tid])
+ return false;
+
+ if (new_pn.pn48 != last_pn->pn48 + 1)
+ return false;
+
+ last_pn->pn48 = new_pn.pn48;
+ }
+
+ return true;
+}
+
+static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *status,
+ bool fill_crypt_header,
+ u8 *rx_hdr,
+ enum ath10k_pkt_rx_err *err,
+ u16 peer_id,
+ bool frag)
+{
+ struct sk_buff *first;
+ struct sk_buff *last;
+ struct sk_buff *msdu, *temp;
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct htt_rx_desc *rxd;
+ struct rx_attention *rxd_attention;
+ struct rx_mpdu_start *rxd_mpdu_start;
+
+ struct ieee80211_hdr *hdr;
+ enum htt_rx_mpdu_encrypt_type enctype;
+ u8 first_hdr[64];
+ u8 *qos;
+ bool has_fcs_err;
+ bool has_crypto_err;
+ bool has_tkip_err;
+ bool has_peer_idx_invalid;
+ bool is_decrypted;
+ bool is_mgmt;
+ u32 attention;
+ bool frag_pn_check = true, multicast_check = true;
+
+ if (skb_queue_empty(amsdu))
+ return;
+
+ first = skb_peek(amsdu);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+
+ is_mgmt = !!(rxd_attention->flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
+
+ enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+ /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
+ * decapped header. It'll be used for undecapping of each MSDU.
+ */
+ hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
+ memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
+
+ if (rx_hdr)
+ memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
+
+ /* Each A-MSDU subframe will use the original header as the base and be
+ * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
+ */
+ hdr = (void *)first_hdr;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
+
+ /* Some attention flags are valid only in the last MSDU. */
+ last = skb_peek_tail(amsdu);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)last->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
+ attention = __le32_to_cpu(rxd_attention->flags);
+
+ has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
+ has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
+ has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
+ has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
+
+ /* Note: If hardware captures an encrypted frame that it can't decrypt,
+ * e.g. due to fcs error, missing peer or invalid key data it will
+ * report the frame as raw.
+ */
+ is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
+ !has_fcs_err &&
+ !has_crypto_err &&
+ !has_peer_idx_invalid);
+
+ /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
+ status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
+ RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_ONLY_MONITOR |
+ RX_FLAG_MMIC_STRIPPED);
+
+ if (has_fcs_err)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (has_tkip_err)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (err) {
+ if (has_fcs_err)
+ *err = ATH10K_PKT_RX_ERR_FCS;
+ else if (has_tkip_err)
+ *err = ATH10K_PKT_RX_ERR_TKIP;
+ else if (has_crypto_err)
+ *err = ATH10K_PKT_RX_ERR_CRYPT;
+ else if (has_peer_idx_invalid)
+ *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
+ }
+
+ /* Firmware reports all necessary management frames via WMI already.
+ * They are not reported to monitor interfaces at all so pass the ones
+ * coming via HTT to monitor interfaces instead. This simplifies
+ * matters a lot.
+ */
+ if (is_mgmt)
+ status->flag |= RX_FLAG_ONLY_MONITOR;
+
+ if (is_decrypted) {
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (likely(!is_mgmt))
+ status->flag |= RX_FLAG_MMIC_STRIPPED;
+
+ if (fill_crypt_header)
+ status->flag |= RX_FLAG_MIC_STRIPPED |
+ RX_FLAG_ICV_STRIPPED;
+ else
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ }
+
+ skb_queue_walk(amsdu, msdu) {
+ if (frag && !fill_crypt_header && is_decrypted &&
+ enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+ frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
+ msdu,
+ peer_id,
+ enctype);
+
+ if (frag)
+ multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
+ msdu);
+
+ if (!frag_pn_check || !multicast_check) {
+ /* Discard the fragment with invalid PN or multicast DA
+ */
+ temp = msdu->prev;
+ __skb_unlink(msdu, amsdu);
+ dev_kfree_skb_any(msdu);
+ msdu = temp;
+ frag_pn_check = true;
+ multicast_check = true;
+ continue;
+ }
+
+ ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu);
+
+ if (frag && !fill_crypt_header &&
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ status->flag &= ~RX_FLAG_MMIC_STRIPPED;
+
+ ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
+ is_decrypted);
+
+ /* Undecapping involves copying the original 802.11 header back
+ * to sk_buff. If frame is protected and hardware has decrypted
+ * it then remove the protected bit.
+ */
+ if (!is_decrypted)
+ continue;
+ if (is_mgmt)
+ continue;
+
+ if (fill_crypt_header)
+ continue;
+
+ hdr = (void *)msdu->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+
+ if (frag && !fill_crypt_header &&
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ status->flag &= ~RX_FLAG_IV_STRIPPED &
+ ~RX_FLAG_MMIC_STRIPPED;
+ }
+}
+
+static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *status)
+{
+ struct sk_buff *msdu;
+ struct sk_buff *first_subframe;
+
+ first_subframe = skb_peek(amsdu);
+
+ while ((msdu = __skb_dequeue(amsdu))) {
+ /* Setup per-MSDU flags */
+ if (skb_queue_empty(amsdu))
+ status->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ status->flag |= RX_FLAG_AMSDU_MORE;
+
+ if (msdu == first_subframe) {
+ first_subframe = NULL;
+ status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ status->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+
+ ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
+ }
+}
+
+static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
+ unsigned long *unchain_cnt)
+{
+ struct sk_buff *skb, *first;
+ int space;
+ int total_len = 0;
+ int amsdu_len = skb_queue_len(amsdu);
+
+ /* TODO: Might could optimize this by using
+ * skb_try_coalesce or similar method to
+ * decrease copying, or maybe get mac80211 to
+ * provide a way to just receive a list of
+ * skb?
+ */
+
+ first = __skb_dequeue(amsdu);
+
+ /* Allocate total length all at once. */
+ skb_queue_walk(amsdu, skb)
+ total_len += skb->len;
+
+ space = total_len - skb_tailroom(first);
+ if ((space > 0) &&
+ (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
+ /* TODO: bump some rx-oom error stat */
+ /* put it back together so we can free the
+ * whole list at once.
+ */
+ __skb_queue_head(amsdu, first);
+ return -1;
+ }
+
+ /* Walk list again, copying contents into
+ * msdu_head
+ */
+ while ((skb = __skb_dequeue(amsdu))) {
+ skb_copy_from_linear_data(skb, skb_put(first, skb->len),
+ skb->len);
+ dev_kfree_skb_any(skb);
+ }
+
+ __skb_queue_head(amsdu, first);
+
+ *unchain_cnt += amsdu_len - 1;
+
+ return 0;
+}
+
+static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ unsigned long *drop_cnt,
+ unsigned long *unchain_cnt)
+{
+ struct sk_buff *first;
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct htt_rx_desc *rxd;
+ struct rx_msdu_start_common *rxd_msdu_start_common;
+ struct rx_frag_info_common *rxd_frag_info;
+ enum rx_msdu_decap_format decap;
+
+ first = skb_peek(amsdu);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
+ rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd);
+ decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+ /* FIXME: Current unchaining logic can only handle simple case of raw
+ * msdu chaining. If decapping is other than raw the chaining may be
+ * more complex and this isn't handled by the current code. Don't even
+ * try re-constructing such frames - it'll be pretty much garbage.
+ */
+ if (decap != RX_MSDU_DECAP_RAW ||
+ skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) {
+ *drop_cnt += skb_queue_len(amsdu);
+ __skb_queue_purge(amsdu);
+ return;
+ }
+
+ ath10k_unchain_msdu(amsdu, unchain_cnt);
+}
+
+static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu)
+{
+ u8 *subframe_hdr;
+ struct sk_buff *first;
+ bool is_first, is_last;
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+ struct rx_mpdu_start *rxd_mpdu_start;
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len, crypto_len;
+ enum htt_rx_mpdu_encrypt_type enctype;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+ first = skb_peek(amsdu);
+
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)first->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
+ hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
+
+ is_first = !!(rxd_msdu_end_common->info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd_msdu_end_common->info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+
+ /* Return in case of non-aggregated msdu */
+ if (is_first && is_last)
+ return true;
+
+ /* First msdu flag is not set for the first msdu of the list */
+ if (!is_first)
+ return false;
+
+ enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
+ crypto_len;
+
+ /* Validate if the amsdu has a proper first subframe.
+ * There are chances a single msdu can be received as amsdu when
+ * the unauthenticated amsdu flag of a QoS header
+ * gets flipped in non-SPP AMSDU's, in such cases the first
+ * subframe has llc/snap header in place of a valid da.
+ * return false if the da matches rfc1042 pattern
+ */
+ if (ether_addr_equal(subframe_hdr, rfc1042_header))
+ return false;
+
+ return true;
+}
+
+static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *rx_status)
+{
+ if (!rx_status->freq) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
+ return false;
+ }
+
+ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
+ return false;
+ }
+
+ if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void ath10k_htt_rx_h_filter(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *rx_status,
+ unsigned long *drop_cnt)
+{
+ if (skb_queue_empty(amsdu))
+ return;
+
+ if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
+ return;
+
+ if (drop_cnt)
+ *drop_cnt += skb_queue_len(amsdu);
+
+ __skb_queue_purge(amsdu);
+}
+
+static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct ieee80211_rx_status *rx_status = &htt->rx_status;
+ struct sk_buff_head amsdu;
+ int ret;
+ unsigned long drop_cnt = 0;
+ unsigned long unchain_cnt = 0;
+ unsigned long drop_cnt_filter = 0;
+ unsigned long msdus_to_queue, num_msdus;
+ enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
+ u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
+
+ __skb_queue_head_init(&amsdu);
+
+ spin_lock_bh(&htt->rx_ring.lock);
+ if (htt->rx_confused) {
+ spin_unlock_bh(&htt->rx_ring.lock);
+ return -EIO;
+ }
+ ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
+ spin_unlock_bh(&htt->rx_ring.lock);
+
+ if (ret < 0) {
+ ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
+ __skb_queue_purge(&amsdu);
+ /* FIXME: It's probably a good idea to reboot the
+ * device instead of leaving it inoperable.
+ */
+ htt->rx_confused = true;
+ return ret;
+ }
+
+ num_msdus = skb_queue_len(&amsdu);
+
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
+
+ /* only for ret = 1 indicates chained msdus */
+ if (ret > 0)
+ ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
+
+ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
+ false);
+ msdus_to_queue = skb_queue_len(&amsdu);
+ ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
+
+ ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
+ unchain_cnt, drop_cnt, drop_cnt_filter,
+ msdus_to_queue);
+
+ return 0;
+}
+
+static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
+ union htt_rx_pn_t *pn,
+ int pn_len_bits)
+{
+ switch (pn_len_bits) {
+ case 48:
+ pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) +
+ ((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32);
+ break;
+ case 24:
+ pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
+ break;
+ }
+}
+
+static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
+ union htt_rx_pn_t *old_pn)
+{
+ return ((new_pn->pn48 & 0xffffffffffffULL) <=
+ (old_pn->pn48 & 0xffffffffffffULL));
+}
+
+static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
+ struct ath10k_peer *peer,
+ struct htt_rx_indication_hl *rx)
+{
+ bool last_pn_valid, pn_invalid = false;
+ enum htt_txrx_sec_cast_type sec_index;
+ enum htt_security_types sec_type;
+ union htt_rx_pn_t new_pn = {};
+ struct htt_hl_rx_desc *rx_desc;
+ union htt_rx_pn_t *last_pn;
+ u32 rx_desc_info, tid;
+ int num_mpdu_ranges;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!peer)
+ return false;
+
+ if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU))
+ return false;
+
+ num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
+ HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
+
+ rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
+ rx_desc_info = __le32_to_cpu(rx_desc->info);
+
+ if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED))
+ return false;
+
+ tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
+ last_pn_valid = peer->tids_last_pn_valid[tid];
+ last_pn = &peer->tids_last_pn[tid];
+
+ if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
+ sec_index = HTT_TXRX_SEC_MCAST;
+ else
+ sec_index = HTT_TXRX_SEC_UCAST;
+
+ sec_type = peer->rx_pn[sec_index].sec_type;
+ ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
+
+ if (sec_type != HTT_SECURITY_AES_CCMP &&
+ sec_type != HTT_SECURITY_TKIP &&
+ sec_type != HTT_SECURITY_TKIP_NOMIC)
+ return false;
+
+ if (last_pn_valid)
+ pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
+ else
+ peer->tids_last_pn_valid[tid] = true;
+
+ if (!pn_invalid)
+ last_pn->pn48 = new_pn.pn48;
+
+ return pn_invalid;
+}
+
+static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
+ struct htt_rx_indication_hl *rx,
+ struct sk_buff *skb,
+ enum htt_rx_pn_check_type check_pn_type,
+ enum htt_rx_tkip_demic_type tkip_mic_type)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_peer *peer;
+ struct htt_rx_indication_mpdu_range *mpdu_ranges;
+ struct fw_rx_desc_hl *fw_desc;
+ enum htt_txrx_sec_cast_type sec_index;
+ enum htt_security_types sec_type;
+ union htt_rx_pn_t new_pn = {};
+ struct htt_hl_rx_desc *rx_desc;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_rx_status *rx_status;
+ u16 peer_id;
+ u8 rx_desc_len;
+ int num_mpdu_ranges;
+ size_t tot_hdr_len;
+ struct ieee80211_channel *ch;
+ bool pn_invalid, qos, first_msdu;
+ u32 tid, rx_desc_info;
+
+ peer_id = __le16_to_cpu(rx->hdr.peer_id);
+ tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ spin_unlock_bh(&ar->data_lock);
+ if (!peer && peer_id != HTT_INVALID_PEERID)
+ ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
+
+ if (!peer)
+ return true;
+
+ num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
+ HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
+ mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
+ fw_desc = &rx->fw_desc;
+ rx_desc_len = fw_desc->len;
+
+ if (fw_desc->u.bits.discard) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
+ goto err;
+ }
+
+ /* I have not yet seen any case where num_mpdu_ranges > 1.
+ * qcacld does not seem handle that case either, so we introduce the
+ * same limitation here as well.
+ */
+ if (num_mpdu_ranges > 1)
+ ath10k_warn(ar,
+ "Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
+ num_mpdu_ranges);
+
+ if (mpdu_ranges->mpdu_range_status !=
+ HTT_RX_IND_MPDU_STATUS_OK &&
+ mpdu_ranges->mpdu_range_status !=
+ HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",
+ mpdu_ranges->mpdu_range_status);
+ goto err;
+ }
+
+ rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
+ rx_desc_info = __le32_to_cpu(rx_desc->info);
+
+ if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
+ sec_index = HTT_TXRX_SEC_MCAST;
+ else
+ sec_index = HTT_TXRX_SEC_UCAST;
+
+ sec_type = peer->rx_pn[sec_index].sec_type;
+ first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU;
+
+ ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
+
+ if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) {
+ spin_lock_bh(&ar->data_lock);
+ pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (pn_invalid)
+ goto err;
+ }
+
+ /* Strip off all headers before the MAC header before delivery to
+ * mac80211
+ */
+ tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
+ sizeof(rx->ppdu) + sizeof(rx->prefix) +
+ sizeof(rx->fw_desc) +
+ sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
+
+ skb_pull(skb, tot_hdr_len);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ qos = ieee80211_is_data_qos(hdr->frame_control);
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+ memset(rx_status, 0, sizeof(*rx_status));
+
+ if (rx->ppdu.combined_rssi == 0) {
+ /* SDIO firmware does not provide signal */
+ rx_status->signal = 0;
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ } else {
+ rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ rx->ppdu.combined_rssi;
+ rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ ch = ar->scan_channel;
+ if (!ch)
+ ch = ar->rx_channel;
+ if (!ch)
+ ch = ath10k_htt_rx_h_any_channel(ar);
+ if (!ch)
+ ch = ar->tgt_oper_chan;
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ch) {
+ rx_status->band = ch->band;
+ rx_status->freq = ch->center_freq;
+ }
+ if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
+ rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ rx_status->flag |= RX_FLAG_AMSDU_MORE;
+
+ /* Not entirely sure about this, but all frames from the chipset has
+ * the protected flag set even though they have already been decrypted.
+ * Unmasking this flag is necessary in order for mac80211 not to drop
+ * the frame.
+ * TODO: Verify this is always the case or find out a way to check
+ * if there has been hw decryption.
+ */
+ if (ieee80211_has_protected(hdr->frame_control)) {
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ rx_status->flag |= RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+
+ if (tid < IEEE80211_NUM_TIDS &&
+ first_msdu &&
+ check_pn_type == HTT_RX_PN_CHECK &&
+ (sec_type == HTT_SECURITY_AES_CCMP ||
+ sec_type == HTT_SECURITY_TKIP ||
+ sec_type == HTT_SECURITY_TKIP_NOMIC)) {
+ u8 offset, *ivp, i;
+ s8 keyidx = 0;
+ __le64 pn48 = cpu_to_le64(new_pn.pn48);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ offset = ieee80211_hdrlen(hdr->frame_control);
+ hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ rx_status->flag &= ~RX_FLAG_IV_STRIPPED;
+
+ memmove(skb->data - IEEE80211_CCMP_HDR_LEN,
+ skb->data, offset);
+ skb_push(skb, IEEE80211_CCMP_HDR_LEN);
+ ivp = skb->data + offset;
+ memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN);
+ /* Ext IV */
+ ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (peer->keys[i] &&
+ peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ keyidx = peer->keys[i]->keyidx;
+ }
+
+ /* Key ID */
+ ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6;
+
+ if (sec_type == HTT_SECURITY_AES_CCMP) {
+ rx_status->flag |= RX_FLAG_MIC_STRIPPED;
+ /* pn 0, pn 1 */
+ memcpy(skb->data + offset, &pn48, 2);
+ /* pn 1, pn 3 , pn 34 , pn 5 */
+ memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
+ } else {
+ rx_status->flag |= RX_FLAG_ICV_STRIPPED;
+ /* TSC 0 */
+ memcpy(skb->data + offset + 2, &pn48, 1);
+ /* TSC 1 */
+ memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1);
+ /* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
+ memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
+ }
+ }
+ }
+
+ if (tkip_mic_type == HTT_RX_TKIP_MIC)
+ rx_status->flag &= ~RX_FLAG_IV_STRIPPED &
+ ~RX_FLAG_MMIC_STRIPPED;
+
+ if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (!qos && tid < IEEE80211_NUM_TIDS) {
+ u8 offset;
+ __le16 qos_ctrl = 0;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ offset = ieee80211_hdrlen(hdr->frame_control);
+
+ hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+ memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset);
+ skb_push(skb, IEEE80211_QOS_CTL_LEN);
+ qos_ctrl = cpu_to_le16(tid);
+ memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
+ }
+
+ if (ar->napi.dev)
+ ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
+ else
+ ieee80211_rx_ni(ar->hw, skb);
+
+ /* We have delivered the skb to the upper layers (mac80211) so we
+ * must not free it.
+ */
+ return false;
+err:
+ /* Tell the caller that it must free the skb since we have not
+ * consumed it
+ */
+ return true;
+}
+
+static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb,
+ u16 head_len,
+ u16 hdr_len)
+{
+ u8 *ivp, *orig_hdr;
+
+ orig_hdr = skb->data;
+ ivp = orig_hdr + hdr_len + head_len;
+
+ /* the ExtIV bit is always set to 1 for TKIP */
+ if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
+ return -EINVAL;
+
+ memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
+ skb_pull(skb, IEEE80211_TKIP_IV_LEN);
+ skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN);
+ return 0;
+}
+
+static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb,
+ u16 head_len,
+ u16 hdr_len)
+{
+ u8 *ivp, *orig_hdr;
+
+ orig_hdr = skb->data;
+ ivp = orig_hdr + hdr_len + head_len;
+
+ /* the ExtIV bit is always set to 1 for TKIP */
+ if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
+ return -EINVAL;
+
+ memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
+ skb_pull(skb, IEEE80211_TKIP_IV_LEN);
+ skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
+ return 0;
+}
+
+static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb,
+ u16 head_len,
+ u16 hdr_len)
+{
+ u8 *ivp, *orig_hdr;
+
+ orig_hdr = skb->data;
+ ivp = orig_hdr + hdr_len + head_len;
+
+ /* the ExtIV bit is always set to 1 for CCMP */
+ if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
+ return -EINVAL;
+
+ skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN);
+ memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len);
+ skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
+ return 0;
+}
+
+static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb,
+ u16 head_len,
+ u16 hdr_len)
+{
+ u8 *orig_hdr;
+
+ orig_hdr = skb->data;
+
+ memmove(orig_hdr + IEEE80211_WEP_IV_LEN,
+ orig_hdr, head_len + hdr_len);
+ skb_pull(skb, IEEE80211_WEP_IV_LEN);
+ skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
+ return 0;
+}
+
+static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
+ struct htt_rx_fragment_indication *rx,
+ struct sk_buff *skb)
+{
+ struct ath10k *ar = htt->ar;
+ enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC;
+ enum htt_txrx_sec_cast_type sec_index;
+ struct htt_rx_indication_hl *rx_hl;
+ enum htt_security_types sec_type;
+ u32 tid, frag, seq, rx_desc_info;
+ union htt_rx_pn_t new_pn = {};
+ struct htt_hl_rx_desc *rx_desc;
+ u16 peer_id, sc, hdr_space;
+ union htt_rx_pn_t *last_pn;
+ struct ieee80211_hdr *hdr;
+ int ret, num_mpdu_ranges;
+ struct ath10k_peer *peer;
+ struct htt_resp *resp;
+ size_t tot_hdr_len;
+
+ resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
+ skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
+ skb_trim(skb, skb->len - FCS_LEN);
+
+ peer_id = __le16_to_cpu(rx->peer_id);
+ rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id);
+ goto err;
+ }
+
+ num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1),
+ HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
+
+ tot_hdr_len = sizeof(struct htt_resp_hdr) +
+ sizeof(rx_hl->hdr) +
+ sizeof(rx_hl->ppdu) +
+ sizeof(rx_hl->prefix) +
+ sizeof(rx_hl->fw_desc) +
+ sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges;
+
+ tid = MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
+ rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
+ rx_desc_info = __le32_to_cpu(rx_desc->info);
+
+ hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
+
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ /* Discard the fragment with multicast DA */
+ goto err;
+ }
+
+ if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
+ spin_unlock_bh(&ar->data_lock);
+ return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
+ HTT_RX_NON_PN_CHECK,
+ HTT_RX_NON_TKIP_MIC);
+ }
+
+ if (ieee80211_has_retry(hdr->frame_control))
+ goto err;
+
+ hdr_space = ieee80211_hdrlen(hdr->frame_control);
+ sc = __le16_to_cpu(hdr->seq_ctrl);
+ seq = IEEE80211_SEQ_TO_SN(sc);
+ frag = sc & IEEE80211_SCTL_FRAG;
+
+ sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?
+ HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST;
+ sec_type = peer->rx_pn[sec_index].sec_type;
+ ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
+
+ switch (sec_type) {
+ case HTT_SECURITY_TKIP:
+ tkip_mic = HTT_RX_TKIP_MIC;
+ ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb,
+ tot_hdr_len +
+ rx_hl->fw_desc.len,
+ hdr_space);
+ if (ret)
+ goto err;
+ break;
+ case HTT_SECURITY_TKIP_NOMIC:
+ ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb,
+ tot_hdr_len +
+ rx_hl->fw_desc.len,
+ hdr_space);
+ if (ret)
+ goto err;
+ break;
+ case HTT_SECURITY_AES_CCMP:
+ ret = ath10k_htt_rx_frag_ccmp_decap(skb,
+ tot_hdr_len + rx_hl->fw_desc.len,
+ hdr_space);
+ if (ret)
+ goto err;
+ break;
+ case HTT_SECURITY_WEP128:
+ case HTT_SECURITY_WEP104:
+ case HTT_SECURITY_WEP40:
+ ret = ath10k_htt_rx_frag_wep_decap(skb,
+ tot_hdr_len + rx_hl->fw_desc.len,
+ hdr_space);
+ if (ret)
+ goto err;
+ break;
+ default:
+ break;
+ }
+
+ resp = (struct htt_resp *)(skb->data);
+
+ if (sec_type != HTT_SECURITY_AES_CCMP &&
+ sec_type != HTT_SECURITY_TKIP &&
+ sec_type != HTT_SECURITY_TKIP_NOMIC) {
+ spin_unlock_bh(&ar->data_lock);
+ return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
+ HTT_RX_NON_PN_CHECK,
+ HTT_RX_NON_TKIP_MIC);
+ }
+
+ last_pn = &peer->frag_tids_last_pn[tid];
+
+ if (frag == 0) {
+ if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl))
+ goto err;
+
+ last_pn->pn48 = new_pn.pn48;
+ peer->frag_tids_seq[tid] = seq;
+ } else if (sec_type == HTT_SECURITY_AES_CCMP) {
+ if (seq != peer->frag_tids_seq[tid])
+ goto err;
+
+ if (new_pn.pn48 != last_pn->pn48 + 1)
+ goto err;
+
+ last_pn->pn48 = new_pn.pn48;
+ last_pn = &peer->tids_last_pn[tid];
+ last_pn->pn48 = new_pn.pn48;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
+ HTT_RX_NON_PN_CHECK, tkip_mic);
+
+err:
+ spin_unlock_bh(&ar->data_lock);
+
+ /* Tell the caller that it must free the skb since we have not
+ * consumed it
+ */
+ return true;
+}
+
+static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
+ struct htt_rx_indication *rx)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_rx_indication_mpdu_range *mpdu_ranges;
+ int num_mpdu_ranges;
+ int i, mpdu_count = 0;
+ u16 peer_id;
+ u8 tid;
+
+ num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
+ HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
+ peer_id = __le16_to_cpu(rx->hdr.peer_id);
+ tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
+
+ mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
+ rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges));
+
+ for (i = 0; i < num_mpdu_ranges; i++)
+ mpdu_count += mpdu_ranges[i].mpdu_count;
+
+ atomic_add(mpdu_count, &htt->num_mpdus_ready);
+
+ ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
+ num_mpdu_ranges);
+}
+
+static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+ struct htt_tx_done tx_done = {};
+ int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
+ __le16 msdu_id, *msdus;
+ bool rssi_enabled = false;
+ u8 msdu_count = 0, num_airtime_records, tid;
+ int i, htt_pad = 0;
+ struct htt_data_tx_compl_ppdu_dur *ppdu_info;
+ struct ath10k_peer *peer;
+ u16 ppdu_info_offset = 0, peer_id;
+ u32 tx_duration;
+
+ switch (status) {
+ case HTT_DATA_TX_STATUS_NO_ACK:
+ tx_done.status = HTT_TX_COMPL_STATE_NOACK;
+ break;
+ case HTT_DATA_TX_STATUS_OK:
+ tx_done.status = HTT_TX_COMPL_STATE_ACK;
+ break;
+ case HTT_DATA_TX_STATUS_DISCARD:
+ case HTT_DATA_TX_STATUS_POSTPONE:
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
+ break;
+ default:
+ ath10k_warn(ar, "unhandled tx completion status %d\n", status);
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
+ break;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
+ resp->data_tx_completion.num_msdus);
+
+ msdu_count = resp->data_tx_completion.num_msdus;
+ msdus = resp->data_tx_completion.msdus;
+ rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp);
+
+ if (rssi_enabled)
+ htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params,
+ resp);
+
+ for (i = 0; i < msdu_count; i++) {
+ msdu_id = msdus[i];
+ tx_done.msdu_id = __le16_to_cpu(msdu_id);
+
+ if (rssi_enabled) {
+ /* Total no of MSDUs should be even,
+ * if odd MSDUs are sent firmware fills
+ * last msdu id with 0xffff
+ */
+ if (msdu_count & 0x01) {
+ msdu_id = msdus[msdu_count + i + 1 + htt_pad];
+ tx_done.ack_rssi = __le16_to_cpu(msdu_id);
+ } else {
+ msdu_id = msdus[msdu_count + i + htt_pad];
+ tx_done.ack_rssi = __le16_to_cpu(msdu_id);
+ }
+ }
+
+ /* kfifo_put: In practice firmware shouldn't fire off per-CE
+ * interrupt and main interrupt (MSI/-X range case) for the same
+ * HTC service so it should be safe to use kfifo_put w/o lock.
+ *
+ * From kfifo_put() documentation:
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
+ ath10k_txrx_tx_unref(htt, &tx_done);
+ } else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
+ ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
+ tx_done.msdu_id, tx_done.status);
+ ath10k_txrx_tx_unref(htt, &tx_done);
+ }
+ }
+
+ if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT))
+ return;
+
+ ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count;
+
+ if (rssi_enabled)
+ ppdu_info_offset += ppdu_info_offset;
+
+ if (resp->data_tx_completion.flags2 &
+ (HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT))
+ ppdu_info_offset += 2;
+
+ ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset];
+ num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK,
+ __le32_to_cpu(ppdu_info->info0));
+
+ for (i = 0; i < num_airtime_records; i++) {
+ struct htt_data_tx_ppdu_dur *ppdu_dur;
+ u32 info0;
+
+ ppdu_dur = &ppdu_info->ppdu_dur[i];
+ info0 = __le32_to_cpu(ppdu_dur->info0);
+
+ peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK,
+ info0);
+ rcu_read_lock();
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer || !peer->sta) {
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+ continue;
+ }
+
+ tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) &
+ IEEE80211_QOS_CTL_TID_MASK;
+ tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);
+
+ ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
+
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+ }
+}
+
+static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
+{
+ struct htt_rx_addba *ev = &resp->rx_addba;
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ u16 info0, tid, peer_id;
+
+ info0 = __le16_to_cpu(ev->info0);
+ tid = MS(info0, HTT_RX_BA_INFO0_TID);
+ peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx addba tid %u peer_id %u size %u\n",
+ tid, peer_id, ev->window_size);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
+ peer_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ arvif = ath10k_get_arvif(ar, peer->vdev_id);
+ if (!arvif) {
+ ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
+ peer->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx start rx ba session sta %pM tid %u size %u\n",
+ peer->addr, tid, ev->window_size);
+
+ ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
+{
+ struct htt_rx_delba *ev = &resp->rx_delba;
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ u16 info0, tid, peer_id;
+
+ info0 = __le16_to_cpu(ev->info0);
+ tid = MS(info0, HTT_RX_BA_INFO0_TID);
+ peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx delba tid %u peer_id %u\n",
+ tid, peer_id);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
+ peer_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ arvif = ath10k_get_arvif(ar, peer->vdev_id);
+ if (!arvif) {
+ ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
+ peer->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx stop rx ba session sta %pM tid %u\n",
+ peer->addr, tid);
+
+ ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw,
+ struct sk_buff_head *list,
+ struct sk_buff_head *amsdu)
+{
+ struct sk_buff *msdu;
+ struct htt_rx_desc *rxd;
+ struct rx_msdu_end_common *rxd_msdu_end_common;
+
+ if (skb_queue_empty(list))
+ return -ENOBUFS;
+
+ if (WARN_ON(!skb_queue_empty(amsdu)))
+ return -EINVAL;
+
+ while ((msdu = __skb_dequeue(list))) {
+ __skb_queue_tail(amsdu, msdu);
+
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data -
+ hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ if (rxd_msdu_end_common->info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
+ break;
+ }
+
+ msdu = skb_peek_tail(amsdu);
+ rxd = HTT_RX_BUF_TO_RX_DESC(hw,
+ (void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
+
+ rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
+ if (!(rxd_msdu_end_common->info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
+ skb_queue_splice_init(amsdu, list);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_has_protected(hdr->frame_control))
+ return;
+
+ /* Offloaded frames are already decrypted but firmware insists they are
+ * protected in the 802.11 header. Strip the flag. Otherwise mac80211
+ * will drop the frame.
+ */
+
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ status->flag |= RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+}
+
+static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
+ struct sk_buff_head *list)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct ieee80211_rx_status *status = &htt->rx_status;
+ struct htt_rx_offload_msdu *rx;
+ struct sk_buff *msdu;
+ size_t offset;
+
+ while ((msdu = __skb_dequeue(list))) {
+ /* Offloaded frames don't have Rx descriptor. Instead they have
+ * a short meta information header.
+ */
+
+ rx = (void *)msdu->data;
+
+ skb_put(msdu, sizeof(*rx));
+ skb_pull(msdu, sizeof(*rx));
+
+ if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
+ ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ skb_put(msdu, __le16_to_cpu(rx->msdu_len));
+
+ /* Offloaded rx header length isn't multiple of 2 nor 4 so the
+ * actual payload is unaligned. Align the frame. Otherwise
+ * mac80211 complains. This shouldn't reduce performance much
+ * because these offloaded frames are rare.
+ */
+ offset = 4 - ((unsigned long)msdu->data & 3);
+ skb_put(msdu, offset);
+ memmove(msdu->data + offset, msdu->data, msdu->len);
+ skb_pull(msdu, offset);
+
+ /* FIXME: The frame is NWifi. Re-construct QoS Control
+ * if possible later.
+ */
+
+ memset(status, 0, sizeof(*status));
+ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ ath10k_htt_rx_h_rx_offload_prot(status, msdu);
+ ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
+ ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
+ }
+}
+
+static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_resp *resp = (void *)skb->data;
+ struct ieee80211_rx_status *status = &htt->rx_status;
+ struct sk_buff_head list;
+ struct sk_buff_head amsdu;
+ u16 peer_id;
+ u16 msdu_count;
+ u8 vdev_id;
+ u8 tid;
+ bool offload;
+ bool frag;
+ int ret;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ if (htt->rx_confused)
+ return -EIO;
+
+ skb_pull(skb, sizeof(resp->hdr));
+ skb_pull(skb, sizeof(resp->rx_in_ord_ind));
+
+ peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
+ msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
+ vdev_id = resp->rx_in_ord_ind.vdev_id;
+ tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
+ offload = !!(resp->rx_in_ord_ind.info &
+ HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+ frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
+ vdev_id, peer_id, tid, offload, frag, msdu_count);
+
+ if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
+ ath10k_warn(ar, "dropping invalid in order rx indication\n");
+ return -EINVAL;
+ }
+
+ /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
+ * extracted and processed.
+ */
+ __skb_queue_head_init(&list);
+ if (ar->hw_params.target_64bit)
+ ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
+ &list);
+ else
+ ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
+ &list);
+
+ if (ret < 0) {
+ ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
+ htt->rx_confused = true;
+ return -EIO;
+ }
+
+ /* Offloaded frames are very different and need to be handled
+ * separately.
+ */
+ if (offload)
+ ath10k_htt_rx_h_rx_offload(ar, &list);
+
+ while (!skb_queue_empty(&list)) {
+ __skb_queue_head_init(&amsdu);
+ ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu);
+ switch (ret) {
+ case 0:
+ /* Note: The in-order indication may report interleaved
+ * frames from different PPDUs meaning reported rx rate
+ * to mac80211 isn't accurate/reliable. It's still
+ * better to report something than nothing though. This
+ * should still give an idea about rx rate to the user.
+ */
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
+ ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
+ NULL, peer_id, frag);
+ ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
+ break;
+ case -EAGAIN:
+ fallthrough;
+ default:
+ /* Should not happen. */
+ ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
+ htt->rx_confused = true;
+ __skb_queue_purge(&list);
+ return -EIO;
+ }
+ }
+ return ret;
+}
+
+static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
+ const __le32 *resp_ids,
+ int num_resp_ids)
+{
+ int i;
+ u32 resp_id;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
+ num_resp_ids);
+
+ for (i = 0; i < num_resp_ids; i++) {
+ resp_id = le32_to_cpu(resp_ids[i]);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
+ resp_id);
+
+ /* TODO: free resp_id */
+ }
+}
+
+static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_txq *txq;
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+ struct htt_tx_fetch_record *record;
+ size_t len;
+ size_t max_num_bytes;
+ size_t max_num_msdus;
+ size_t num_bytes;
+ size_t num_msdus;
+ const __le32 *resp_ids;
+ u16 num_records;
+ u16 num_resp_ids;
+ u16 peer_id;
+ u8 tid;
+ int ret;
+ int i;
+ bool may_tx;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
+
+ len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
+ return;
+ }
+
+ num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
+ num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
+
+ len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
+ len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
+
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n",
+ num_records, num_resp_ids,
+ le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
+
+ if (!ar->htt.tx_q_state.enabled) {
+ ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
+ return;
+ }
+
+ if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
+ ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
+ return;
+ }
+
+ rcu_read_lock();
+
+ for (i = 0; i < num_records; i++) {
+ record = &resp->tx_fetch_ind.records[i];
+ peer_id = MS(le16_to_cpu(record->info),
+ HTT_TX_FETCH_RECORD_INFO_PEER_ID);
+ tid = MS(le16_to_cpu(record->info),
+ HTT_TX_FETCH_RECORD_INFO_TID);
+ max_num_msdus = le16_to_cpu(record->num_msdus);
+ max_num_bytes = le32_to_cpu(record->num_bytes);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n",
+ i, peer_id, tid, max_num_msdus, max_num_bytes);
+
+ if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+ unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+ ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
+ peer_id, tid);
+ continue;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+ spin_unlock_bh(&ar->data_lock);
+
+ /* It is okay to release the lock and use txq because RCU read
+ * lock is held.
+ */
+
+ if (unlikely(!txq)) {
+ ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
+ peer_id, tid);
+ continue;
+ }
+
+ num_msdus = 0;
+ num_bytes = 0;
+
+ ieee80211_txq_schedule_start(hw, txq->ac);
+ may_tx = ieee80211_txq_may_transmit(hw, txq);
+ while (num_msdus < max_num_msdus &&
+ num_bytes < max_num_bytes) {
+ if (!may_tx)
+ break;
+
+ ret = ath10k_mac_tx_push_txq(hw, txq);
+ if (ret < 0)
+ break;
+
+ num_msdus++;
+ num_bytes += ret;
+ }
+ ieee80211_return_txq(hw, txq, false);
+ ieee80211_txq_schedule_end(hw, txq->ac);
+
+ record->num_msdus = cpu_to_le16(num_msdus);
+ record->num_bytes = cpu_to_le32(num_bytes);
+
+ ath10k_htt_tx_txq_recalc(hw, txq);
+ }
+
+ rcu_read_unlock();
+
+ resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
+ ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
+
+ ret = ath10k_htt_tx_fetch_resp(ar,
+ resp->tx_fetch_ind.token,
+ resp->tx_fetch_ind.fetch_seq_num,
+ resp->tx_fetch_ind.records,
+ num_records);
+ if (unlikely(ret)) {
+ ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
+ le32_to_cpu(resp->tx_fetch_ind.token), ret);
+ /* FIXME: request fw restart */
+ }
+
+ ath10k_htt_tx_txq_sync(ar);
+}
+
+static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct htt_resp *resp = (void *)skb->data;
+ size_t len;
+ int num_resp_ids;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
+
+ len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
+ return;
+ }
+
+ num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
+ len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
+
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
+ return;
+ }
+
+ ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
+ resp->tx_fetch_confirm.resp_ids,
+ num_resp_ids);
+}
+
+static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct htt_resp *resp = (void *)skb->data;
+ const struct htt_tx_mode_switch_record *record;
+ struct ieee80211_txq *txq;
+ struct ath10k_txq *artxq;
+ size_t len;
+ size_t num_records;
+ enum htt_tx_mode_switch_mode mode;
+ bool enable;
+ u16 info0;
+ u16 info1;
+ u16 threshold;
+ u16 peer_id;
+ u8 tid;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
+
+ len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
+ return;
+ }
+
+ info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
+ info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
+
+ enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
+ num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+ mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
+ threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx tx mode switch ind info0 0x%04x info1 0x%04x enable %d num records %zd mode %d threshold %u\n",
+ info0, info1, enable, num_records, mode, threshold);
+
+ len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
+
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
+ return;
+ }
+
+ switch (mode) {
+ case HTT_TX_MODE_SWITCH_PUSH:
+ case HTT_TX_MODE_SWITCH_PUSH_PULL:
+ break;
+ default:
+ ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
+ mode);
+ return;
+ }
+
+ if (!enable)
+ return;
+
+ ar->htt.tx_q_state.enabled = enable;
+ ar->htt.tx_q_state.mode = mode;
+ ar->htt.tx_q_state.num_push_allowed = threshold;
+
+ rcu_read_lock();
+
+ for (i = 0; i < num_records; i++) {
+ record = &resp->tx_mode_switch_ind.records[i];
+ info0 = le16_to_cpu(record->info0);
+ peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
+ tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
+
+ if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+ unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+ ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
+ peer_id, tid);
+ continue;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+ spin_unlock_bh(&ar->data_lock);
+
+ /* It is okay to release the lock and use txq because RCU read
+ * lock is held.
+ */
+
+ if (unlikely(!txq)) {
+ ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
+ peer_id, tid);
+ continue;
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ artxq = (void *)txq->drv_priv;
+ artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+
+ rcu_read_unlock();
+
+ ath10k_mac_tx_push_pending(ar);
+}
+
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+ bool release;
+
+ release = ath10k_htt_t2h_msg_handler(ar, skb);
+
+ /* Free the indication buffer */
+ if (release)
+ dev_kfree_skb_any(skb);
+}
+
+static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
+{
+ static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
+ 18, 24, 36, 48, 54};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
+ if (rate == legacy_rates[i])
+ return i;
+ }
+
+ ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate);
+ return -EINVAL;
+}
+
+static void
+ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
+ struct ath10k_sta *arsta,
+ struct ath10k_per_peer_tx_stats *pstats,
+ s8 legacy_rate_idx)
+{
+ struct rate_info *txrate = &arsta->txrate;
+ struct ath10k_htt_tx_stats *tx_stats;
+ int idx, ht_idx, gi, mcs, bw, nss;
+ unsigned long flags;
+
+ if (!arsta->tx_stats)
+ return;
+
+ tx_stats = arsta->tx_stats;
+ flags = txrate->flags;
+ gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags);
+ mcs = ATH10K_HW_MCS_RATE(pstats->ratecode);
+ bw = txrate->bw;
+ nss = txrate->nss;
+ ht_idx = mcs + (nss - 1) * 8;
+ idx = mcs * 8 + 8 * 10 * (nss - 1);
+ idx += bw * 2 + gi;
+
+#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
+
+ if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
+ STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
+ STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
+ STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
+ STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
+ STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
+ STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
+ STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
+ STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
+ STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
+ STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
+ STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
+ } else {
+ mcs = legacy_rate_idx;
+
+ STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
+ STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
+ STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
+ STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
+ STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
+ STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
+ }
+
+ if (ATH10K_HW_AMPDU(pstats->flags)) {
+ tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
+
+ if (txrate->flags & RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
+ pstats->succ_bytes + pstats->retry_bytes;
+ STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
+ pstats->succ_pkts + pstats->retry_pkts;
+ } else {
+ STATS_OP_FMT(AMPDU).vht[0][mcs] +=
+ pstats->succ_bytes + pstats->retry_bytes;
+ STATS_OP_FMT(AMPDU).vht[1][mcs] +=
+ pstats->succ_pkts + pstats->retry_pkts;
+ }
+ STATS_OP_FMT(AMPDU).bw[0][bw] +=
+ pstats->succ_bytes + pstats->retry_bytes;
+ STATS_OP_FMT(AMPDU).nss[0][nss - 1] +=
+ pstats->succ_bytes + pstats->retry_bytes;
+ STATS_OP_FMT(AMPDU).gi[0][gi] +=
+ pstats->succ_bytes + pstats->retry_bytes;
+ STATS_OP_FMT(AMPDU).rate_table[0][idx] +=
+ pstats->succ_bytes + pstats->retry_bytes;
+ STATS_OP_FMT(AMPDU).bw[1][bw] +=
+ pstats->succ_pkts + pstats->retry_pkts;
+ STATS_OP_FMT(AMPDU).nss[1][nss - 1] +=
+ pstats->succ_pkts + pstats->retry_pkts;
+ STATS_OP_FMT(AMPDU).gi[1][gi] +=
+ pstats->succ_pkts + pstats->retry_pkts;
+ STATS_OP_FMT(AMPDU).rate_table[1][idx] +=
+ pstats->succ_pkts + pstats->retry_pkts;
+ } else {
+ tx_stats->ack_fails +=
+ ATH10K_HW_BA_FAIL(pstats->flags);
+ }
+
+ STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
+ STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes;
+ STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
+
+ STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
+ STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts;
+ STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
+
+ STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
+ STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes;
+ STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
+
+ STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
+ STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts;
+ STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
+
+ STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
+ STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes;
+ STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
+
+ STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
+ STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts;
+ STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
+
+ if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;
+ STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;
+ STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;
+ STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;
+ STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
+ STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
+ }
+
+ tx_stats->tx_duration += pstats->duration;
+}
+
+static void
+ath10k_update_per_peer_tx_stats(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct ath10k_per_peer_tx_stats *peer_stats)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ieee80211_chanctx_conf *conf = NULL;
+ u8 rate = 0, sgi;
+ s8 rate_idx = 0;
+ bool skip_auto_rate;
+ struct rate_info txrate;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
+ txrate.bw = ATH10K_HW_BW(peer_stats->flags);
+ txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
+ txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
+ sgi = ATH10K_HW_GI(peer_stats->flags);
+ skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags);
+
+ /* Firmware's rate control skips broadcast/management frames,
+ * if host has configure fixed rates and in some other special cases.
+ */
+ if (skip_auto_rate)
+ return;
+
+ if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
+ ath10k_warn(ar, "Invalid VHT mcs %d peer stats", txrate.mcs);
+ return;
+ }
+
+ if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
+ (txrate.mcs > 7 || txrate.nss < 1)) {
+ ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats",
+ txrate.mcs, txrate.nss);
+ return;
+ }
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+ memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
+ if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
+ txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
+ rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
+ /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
+ if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
+ rate = 5;
+ rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
+ if (rate_idx < 0)
+ return;
+ arsta->txrate.legacy = rate;
+ } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
+ } else {
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ arsta->txrate.mcs = txrate.mcs;
+ }
+
+ switch (txrate.flags) {
+ case WMI_RATE_PREAMBLE_OFDM:
+ if (arsta->arvif && arsta->arvif->vif)
+ conf = rcu_dereference(arsta->arvif->vif->bss_conf.chanctx_conf);
+ if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
+ arsta->tx_info.status.rates[0].idx = rate_idx - 4;
+ break;
+ case WMI_RATE_PREAMBLE_CCK:
+ arsta->tx_info.status.rates[0].idx = rate_idx;
+ if (sgi)
+ arsta->tx_info.status.rates[0].flags |=
+ (IEEE80211_TX_RC_USE_SHORT_PREAMBLE |
+ IEEE80211_TX_RC_SHORT_GI);
+ break;
+ case WMI_RATE_PREAMBLE_HT:
+ arsta->tx_info.status.rates[0].idx =
+ txrate.mcs + ((txrate.nss - 1) * 8);
+ if (sgi)
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
+ break;
+ case WMI_RATE_PREAMBLE_VHT:
+ ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0],
+ txrate.mcs, txrate.nss);
+ if (sgi)
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
+ break;
+ }
+
+ arsta->txrate.nss = txrate.nss;
+ arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
+ arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate);
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ switch (arsta->txrate.bw) {
+ case RATE_INFO_BW_40:
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_40_MHZ_WIDTH;
+ break;
+ case RATE_INFO_BW_80:
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_80_MHZ_WIDTH;
+ break;
+ case RATE_INFO_BW_160:
+ arsta->tx_info.status.rates[0].flags |=
+ IEEE80211_TX_RC_160_MHZ_WIDTH;
+ break;
+ }
+
+ if (peer_stats->succ_pkts) {
+ arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
+ arsta->tx_info.status.rates[0].count = 1;
+ ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
+ }
+
+ if (ar->htt.disable_tx_comp) {
+ arsta->tx_failed += peer_stats->failed_pkts;
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",
+ arsta->tx_failed);
+ }
+
+ arsta->tx_retries += peer_stats->retry_pkts;
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);
+
+ if (ath10k_debug_is_extd_tx_stats_enabled(ar))
+ ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
+ rate_idx);
+}
+
+static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+ struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
+ struct htt_per_peer_tx_stats_ind *tx_stats;
+ struct ieee80211_sta *sta;
+ struct ath10k_peer *peer;
+ int peer_id, i;
+ u8 ppdu_len, num_ppdu;
+
+ num_ppdu = resp->peer_tx_stats.num_ppdu;
+ ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
+
+ if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
+ ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
+ return;
+ }
+
+ tx_stats = (struct htt_per_peer_tx_stats_ind *)
+ (resp->peer_tx_stats.payload);
+ peer_id = __le16_to_cpu(tx_stats->peer_id);
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer || !peer->sta) {
+ ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
+ peer_id);
+ goto out;
+ }
+
+ sta = peer->sta;
+ for (i = 0; i < num_ppdu; i++) {
+ tx_stats = (struct htt_per_peer_tx_stats_ind *)
+ (resp->peer_tx_stats.payload + i * ppdu_len);
+
+ p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
+ p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
+ p_tx_stats->failed_bytes =
+ __le32_to_cpu(tx_stats->failed_bytes);
+ p_tx_stats->ratecode = tx_stats->ratecode;
+ p_tx_stats->flags = tx_stats->flags;
+ p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
+ p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
+ p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
+ p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration);
+
+ ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
+ }
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
+static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
+{
+ struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
+ struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
+ struct ath10k_10_2_peer_tx_stats *tx_stats;
+ struct ieee80211_sta *sta;
+ struct ath10k_peer *peer;
+ u16 log_type = __le16_to_cpu(hdr->log_type);
+ u32 peer_id = 0, i;
+
+ if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
+ return;
+
+ tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
+ ATH10K_10_2_TX_STATS_OFFSET);
+
+ if (!tx_stats->tx_ppdu_cnt)
+ return;
+
+ peer_id = tx_stats->peer_id;
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer || !peer->sta) {
+ ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
+ peer_id);
+ goto out;
+ }
+
+ sta = peer->sta;
+ for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
+ p_tx_stats->succ_bytes =
+ __le16_to_cpu(tx_stats->success_bytes[i]);
+ p_tx_stats->retry_bytes =
+ __le16_to_cpu(tx_stats->retry_bytes[i]);
+ p_tx_stats->failed_bytes =
+ __le16_to_cpu(tx_stats->failed_bytes[i]);
+ p_tx_stats->ratecode = tx_stats->ratecode[i];
+ p_tx_stats->flags = tx_stats->flags[i];
+ p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
+ p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
+ p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
+
+ ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
+ }
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+
+ return;
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
+static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type)
+{
+ switch (sec_type) {
+ case HTT_SECURITY_TKIP:
+ case HTT_SECURITY_TKIP_NOMIC:
+ case HTT_SECURITY_AES_CCMP:
+ return 48;
+ default:
+ return 0;
+ }
+}
+
+static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar,
+ struct htt_security_indication *ev)
+{
+ enum htt_txrx_sec_cast_type sec_index;
+ enum htt_security_types sec_type;
+ struct ath10k_peer *peer;
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id));
+ if (!peer) {
+ ath10k_warn(ar, "failed to find peer id %d for security indication",
+ __le16_to_cpu(ev->peer_id));
+ goto out;
+ }
+
+ sec_type = MS(ev->flags, HTT_SECURITY_TYPE);
+
+ if (ev->flags & HTT_SECURITY_IS_UNICAST)
+ sec_index = HTT_TXRX_SEC_UCAST;
+ else
+ sec_index = HTT_TXRX_SEC_MCAST;
+
+ peer->rx_pn[sec_index].sec_type = sec_type;
+ peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type);
+
+ memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid));
+ memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn));
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+ enum htt_t2h_msg_type type;
+
+ /* confirm alignment */
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath10k_warn(ar, "unaligned htt message, expect trouble\n");
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
+ resp->hdr.msg_type);
+
+ if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
+ resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
+ return true;
+ }
+ type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
+
+ switch (type) {
+ case HTT_T2H_MSG_TYPE_VERSION_CONF: {
+ htt->target_version_major = resp->ver_resp.major;
+ htt->target_version_minor = resp->ver_resp.minor;
+ complete(&htt->target_version_received);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_IND:
+ if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
+ ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
+ } else {
+ skb_queue_tail(&htt->rx_indication_head, skb);
+ return false;
+ }
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP: {
+ struct htt_peer_map_event ev = {
+ .vdev_id = resp->peer_map.vdev_id,
+ .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
+ };
+ memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
+ ath10k_peer_map_event(htt, &ev);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
+ struct htt_peer_unmap_event ev = {
+ .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
+ };
+ ath10k_peer_unmap_event(htt, &ev);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
+ struct htt_tx_done tx_done = {};
+ struct ath10k_htt *htt = &ar->htt;
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
+ int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
+ int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
+
+ tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
+
+ switch (status) {
+ case HTT_MGMT_TX_STATUS_OK:
+ tx_done.status = HTT_TX_COMPL_STATE_ACK;
+ if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+ ar->wmi.svc_map) &&
+ (resp->mgmt_tx_completion.flags &
+ HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
+ tx_done.ack_rssi =
+ FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
+ info);
+ }
+ break;
+ case HTT_MGMT_TX_STATUS_RETRY:
+ tx_done.status = HTT_TX_COMPL_STATE_NOACK;
+ break;
+ case HTT_MGMT_TX_STATUS_DROP:
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
+ break;
+ }
+
+ if (htt->disable_tx_comp) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits++;
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
+ status = ath10k_txrx_tx_unref(htt, &tx_done);
+ if (!status) {
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_htt_tx_mgmt_dec_pending(htt);
+ spin_unlock_bh(&htt->tx_lock);
+ }
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
+ ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_SEC_IND: {
+ struct ath10k *ar = htt->ar;
+ struct htt_security_indication *ev = &resp->security_indication;
+
+ ath10k_htt_rx_sec_ind_handler(ar, ev);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "sec ind peer_id %d unicast %d type %d\n",
+ __le16_to_cpu(ev->peer_id),
+ !!(ev->flags & HTT_SECURITY_IS_UNICAST),
+ MS(ev->flags, HTT_SECURITY_TYPE));
+ complete(&ar->install_key_done);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+ skb->data, skb->len);
+ atomic_inc(&htt->num_mpdus_ready);
+
+ return ath10k_htt_rx_proc_rx_frag_ind(htt,
+ &resp->rx_frag_ind,
+ skb);
+ }
+ case HTT_T2H_MSG_TYPE_TEST:
+ break;
+ case HTT_T2H_MSG_TYPE_STATS_CONF:
+ trace_ath10k_htt_stats(ar, skb->data, skb->len);
+ break;
+ case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
+ /* Firmware can return tx frames if it's unable to fully
+ * process them and suspects host may be able to fix it. ath10k
+ * sends all tx frames as already inspected so this shouldn't
+ * happen unless fw has a bug.
+ */
+ ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
+ break;
+ case HTT_T2H_MSG_TYPE_RX_ADDBA:
+ ath10k_htt_rx_addba(ar, resp);
+ break;
+ case HTT_T2H_MSG_TYPE_RX_DELBA:
+ ath10k_htt_rx_delba(ar, resp);
+ break;
+ case HTT_T2H_MSG_TYPE_PKTLOG: {
+ trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
+ skb->len -
+ offsetof(struct htt_resp,
+ pktlog_msg.payload));
+
+ if (ath10k_peer_stats_enabled(ar))
+ ath10k_fetch_10_2_tx_stats(ar,
+ resp->pktlog_msg.payload);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_FLUSH: {
+ /* Ignore this event because mac80211 takes care of Rx
+ * aggregation reordering.
+ */
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
+ skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
+ return false;
+ }
+ case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: {
+ struct ath10k_htt *htt = &ar->htt;
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
+ u32 msg_word = __le32_to_cpu(*(__le32 *)resp);
+ int htt_credit_delta;
+
+ htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word);
+ if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word))
+ htt_credit_delta = -htt_credit_delta;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt credit update delta %d\n",
+ htt_credit_delta);
+
+ if (htt->disable_tx_comp) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += htt_credit_delta;
+ spin_unlock_bh(&htc->tx_lock);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt credit total %d\n",
+ ep->tx_credits);
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ }
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
+ u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
+ u32 freq = __le32_to_cpu(resp->chan_change.freq);
+
+ ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt chan change freq %u phymode %s\n",
+ freq, ath10k_wmi_phymode_str(phymode));
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_AGGR_CONF:
+ break;
+ case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
+ struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
+
+ if (!tx_fetch_ind) {
+ ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
+ break;
+ }
+ skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
+ ath10k_htt_rx_tx_fetch_confirm(ar, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
+ ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_STATS:
+ ath10k_htt_fetch_peer_stats(ar, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_EN_STATS:
+ default:
+ ath10k_warn(ar, "htt event (%d) not handled\n",
+ resp->hdr.msg_type);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+ skb->data, skb->len);
+ break;
+ }
+ return true;
+}
+EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
+
+void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
+
+static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
+{
+ struct sk_buff *skb;
+
+ while (quota < budget) {
+ if (skb_queue_empty(&ar->htt.rx_msdus_q))
+ break;
+
+ skb = skb_dequeue(&ar->htt.rx_msdus_q);
+ if (!skb)
+ break;
+ ath10k_process_rx(ar, skb);
+ quota++;
+ }
+
+ return quota;
+}
+
+int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
+{
+ struct htt_resp *resp;
+ struct ath10k_htt *htt = &ar->htt;
+ struct sk_buff *skb;
+ bool release;
+ int quota;
+
+ for (quota = 0; quota < budget; quota++) {
+ skb = skb_dequeue(&htt->rx_indication_head);
+ if (!skb)
+ break;
+
+ resp = (struct htt_resp *)skb->data;
+
+ release = ath10k_htt_rx_proc_rx_ind_hl(htt,
+ &resp->rx_ind_hl,
+ skb,
+ HTT_RX_PN_CHECK,
+ HTT_RX_NON_TKIP_MIC);
+
+ if (release)
+ dev_kfree_skb_any(skb);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
+ skb_queue_len(&htt->rx_indication_head));
+ }
+ return quota;
+}
+EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
+
+int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_tx_done tx_done = {};
+ struct sk_buff_head tx_ind_q;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int quota = 0, done, ret;
+ bool resched_napi = false;
+
+ __skb_queue_head_init(&tx_ind_q);
+
+ /* Process pending frames before dequeuing more data
+ * from hardware.
+ */
+ quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
+ if (quota == budget) {
+ resched_napi = true;
+ goto exit;
+ }
+
+ while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
+ spin_lock_bh(&htt->rx_ring.lock);
+ ret = ath10k_htt_rx_in_ord_ind(ar, skb);
+ spin_unlock_bh(&htt->rx_ring.lock);
+
+ dev_kfree_skb_any(skb);
+ if (ret == -EIO) {
+ resched_napi = true;
+ goto exit;
+ }
+ }
+
+ while (atomic_read(&htt->num_mpdus_ready)) {
+ ret = ath10k_htt_rx_handle_amsdu(htt);
+ if (ret == -EIO) {
+ resched_napi = true;
+ goto exit;
+ }
+ atomic_dec(&htt->num_mpdus_ready);
+ }
+
+ /* Deliver received data after processing data from hardware */
+ quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
+
+ /* From NAPI documentation:
+ * The napi poll() function may also process TX completions, in which
+ * case if it processes the entire TX ring then it should count that
+ * work as the rest of the budget.
+ */
+ if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
+ quota = budget;
+
+ /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
+ * From kfifo_get() documentation:
+ * Note that with only one concurrent reader and one concurrent writer,
+ * you don't need extra locking to use these macro.
+ */
+ while (kfifo_get(&htt->txdone_fifo, &tx_done))
+ ath10k_txrx_tx_unref(htt, &tx_done);
+
+ ath10k_mac_tx_push_pending(ar);
+
+ spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
+ skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
+ spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
+
+ while ((skb = __skb_dequeue(&tx_ind_q))) {
+ ath10k_htt_rx_tx_fetch_ind(ar, skb);
+ dev_kfree_skb_any(skb);
+ }
+
+exit:
+ ath10k_htt_rx_msdu_buff_replenish(htt);
+ /* In case of rx failure or more data to read, report budget
+ * to reschedule NAPI poll
+ */
+ done = resched_napi ? budget : quota;
+
+ return done;
+}
+EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
+
+static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
+ .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
+ .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
+ .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
+ .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
+ .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
+};
+
+static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
+ .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
+ .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
+ .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
+ .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
+ .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
+};
+
+static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
+ .htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl,
+};
+
+void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
+ htt->rx_ops = &htt_rx_ops_hl;
+ else if (ar->hw_params.target_64bit)
+ htt->rx_ops = &htt_rx_ops_64;
+ else
+ htt->rx_ops = &htt_rx_ops_32;
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
new file mode 100644
index 000000000000..d6f1d85ba871
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -0,0 +1,1842 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/export.h>
+#include <linux/etherdevice.h>
+#include "htt.h"
+#include "mac.h"
+#include "hif.h"
+#include "txrx.h"
+#include "debug.h"
+
+static u8 ath10k_htt_tx_txq_calc_size(size_t count)
+{
+ int exp;
+ int factor;
+
+ exp = 0;
+ factor = count >> 7;
+
+ while (factor >= 64 && exp < 4) {
+ factor >>= 3;
+ exp++;
+ }
+
+ if (exp == 4)
+ return 0xff;
+
+ if (count > 0)
+ factor = max(1, factor);
+
+ return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
+ SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
+}
+
+static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_sta *arsta;
+ struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
+ unsigned long byte_cnt;
+ int idx;
+ u32 bit;
+ u16 peer_id;
+ u8 tid;
+ u8 count;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ if (!ar->htt.tx_q_state.enabled)
+ return;
+
+ if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+ return;
+
+ if (txq->sta) {
+ arsta = (void *)txq->sta->drv_priv;
+ peer_id = arsta->peer_id;
+ } else {
+ peer_id = arvif->peer_id;
+ }
+
+ tid = txq->tid;
+ bit = BIT(peer_id % 32);
+ idx = peer_id / 32;
+
+ ieee80211_txq_get_depth(txq, NULL, &byte_cnt);
+ count = ath10k_htt_tx_txq_calc_size(byte_cnt);
+
+ if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+ unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+ ath10k_warn(ar, "refusing to update txq for peer_id %u tid %u due to out of bounds\n",
+ peer_id, tid);
+ return;
+ }
+
+ ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
+ ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
+ ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %u tid %u count %u\n",
+ peer_id, tid, count);
+}
+
+static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+ u32 seq;
+ size_t size;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ if (!ar->htt.tx_q_state.enabled)
+ return;
+
+ if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+ return;
+
+ seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
+ seq++;
+ ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
+ seq);
+
+ size = sizeof(*ar->htt.tx_q_state.vaddr);
+ dma_sync_single_for_device(ar->dev,
+ ar->htt.tx_q_state.paddr,
+ size,
+ DMA_TO_DEVICE);
+}
+
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ __ath10k_htt_tx_txq_recalc(hw, txq);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->htt.tx_lock);
+ __ath10k_htt_tx_txq_sync(ar);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ __ath10k_htt_tx_txq_recalc(hw, txq);
+ __ath10k_htt_tx_txq_sync(ar);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+ lockdep_assert_held(&htt->tx_lock);
+
+ htt->num_pending_tx--;
+ if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
+ ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+
+ if (htt->num_pending_tx == 0)
+ wake_up(&htt->empty_tx_wq);
+}
+
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
+{
+ lockdep_assert_held(&htt->tx_lock);
+
+ if (htt->num_pending_tx >= htt->max_num_pending_tx)
+ return -EBUSY;
+
+ htt->num_pending_tx++;
+ if (htt->num_pending_tx == htt->max_num_pending_tx)
+ ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+
+ return 0;
+}
+
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+ bool is_presp)
+{
+ struct ath10k *ar = htt->ar;
+
+ lockdep_assert_held(&htt->tx_lock);
+
+ if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
+ return 0;
+
+ if (is_presp &&
+ ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
+ return -EBUSY;
+
+ htt->num_pending_mgmt_tx++;
+
+ return 0;
+}
+
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
+{
+ lockdep_assert_held(&htt->tx_lock);
+
+ if (!htt->ar->hw_params.max_probe_resp_desc_thres)
+ return;
+
+ htt->num_pending_mgmt_tx--;
+}
+
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
+{
+ struct ath10k *ar = htt->ar;
+ int ret;
+
+ spin_lock_bh(&htt->tx_lock);
+ ret = idr_alloc(&htt->pending_tx, skb, 0,
+ htt->max_num_pending_tx, GFP_ATOMIC);
+ spin_unlock_bh(&htt->tx_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
+
+ return ret;
+}
+
+void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
+{
+ struct ath10k *ar = htt->ar;
+
+ lockdep_assert_held(&htt->tx_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %u\n", msdu_id);
+
+ idr_remove(&htt->pending_tx, msdu_id);
+}
+
+static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!htt->txbuf.vaddr_txbuff_32)
+ return;
+
+ size = htt->txbuf.size;
+ dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32,
+ htt->txbuf.paddr);
+ htt->txbuf.vaddr_txbuff_32 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct ath10k_htt_txbuf_32);
+
+ htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size,
+ &htt->txbuf.paddr,
+ GFP_KERNEL);
+ if (!htt->txbuf.vaddr_txbuff_32)
+ return -ENOMEM;
+
+ htt->txbuf.size = size;
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!htt->txbuf.vaddr_txbuff_64)
+ return;
+
+ size = htt->txbuf.size;
+ dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64,
+ htt->txbuf.paddr);
+ htt->txbuf.vaddr_txbuff_64 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct ath10k_htt_txbuf_64);
+
+ htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size,
+ &htt->txbuf.paddr,
+ GFP_KERNEL);
+ if (!htt->txbuf.vaddr_txbuff_64)
+ return -ENOMEM;
+
+ htt->txbuf.size = size;
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)
+{
+ size_t size;
+
+ if (!htt->frag_desc.vaddr_desc_32)
+ return;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc);
+
+ dma_free_coherent(htt->ar->dev,
+ size,
+ htt->frag_desc.vaddr_desc_32,
+ htt->frag_desc.paddr);
+
+ htt->frag_desc.vaddr_desc_32 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc);
+ htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,
+ &htt->frag_desc.paddr,
+ GFP_KERNEL);
+ if (!htt->frag_desc.vaddr_desc_32) {
+ ath10k_err(ar, "failed to alloc fragment desc memory\n");
+ return -ENOMEM;
+ }
+ htt->frag_desc.size = size;
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)
+{
+ size_t size;
+
+ if (!htt->frag_desc.vaddr_desc_64)
+ return;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc_64);
+
+ dma_free_coherent(htt->ar->dev,
+ size,
+ htt->frag_desc.vaddr_desc_64,
+ htt->frag_desc.paddr);
+
+ htt->frag_desc.vaddr_desc_64 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc_64);
+
+ htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,
+ &htt->frag_desc.paddr,
+ GFP_KERNEL);
+ if (!htt->frag_desc.vaddr_desc_64) {
+ ath10k_err(ar, "failed to alloc fragment desc memory\n");
+ return -ENOMEM;
+ }
+ htt->frag_desc.size = size;
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ return;
+
+ size = sizeof(*htt->tx_q_state.vaddr);
+
+ dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE);
+ kfree(htt->tx_q_state.vaddr);
+}
+
+static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+ int ret;
+
+ if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ return 0;
+
+ htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
+ htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS;
+ htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES;
+
+ size = sizeof(*htt->tx_q_state.vaddr);
+ htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);
+ if (!htt->tx_q_state.vaddr)
+ return -ENOMEM;
+
+ htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,
+ size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr);
+ if (ret) {
+ ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret);
+ kfree(htt->tx_q_state.vaddr);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt)
+{
+ WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
+ kfifo_free(&htt->txdone_fifo);
+}
+
+static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt)
+{
+ int ret;
+ size_t size;
+
+ size = roundup_pow_of_two(htt->max_num_pending_tx);
+ ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
+ return ret;
+}
+
+static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ int ret;
+
+ ret = ath10k_htt_alloc_txbuff(htt);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_htt_alloc_frag_desc(htt);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
+ goto free_txbuf;
+ }
+
+ ret = ath10k_htt_tx_alloc_txq(htt);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc txq: %d\n", ret);
+ goto free_frag_desc;
+ }
+
+ ret = ath10k_htt_tx_alloc_txdone_fifo(htt);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
+ goto free_txq;
+ }
+
+ return 0;
+
+free_txq:
+ ath10k_htt_tx_free_txq(htt);
+
+free_frag_desc:
+ ath10k_htt_free_frag_desc(htt);
+
+free_txbuf:
+ ath10k_htt_free_txbuff(htt);
+
+ return ret;
+}
+
+int ath10k_htt_tx_start(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
+ htt->max_num_pending_tx);
+
+ spin_lock_init(&htt->tx_lock);
+ idr_init(&htt->pending_tx);
+
+ if (htt->tx_mem_allocated)
+ return 0;
+
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
+ return 0;
+
+ ret = ath10k_htt_tx_alloc_buf(htt);
+ if (ret)
+ goto free_idr_pending_tx;
+
+ htt->tx_mem_allocated = true;
+
+ return 0;
+
+free_idr_pending_tx:
+ idr_destroy(&htt->pending_tx);
+
+ return ret;
+}
+
+static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
+{
+ struct ath10k *ar = ctx;
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_tx_done tx_done = {};
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %u\n", msdu_id);
+
+ tx_done.msdu_id = msdu_id;
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
+
+ ath10k_txrx_tx_unref(htt, &tx_done);
+
+ return 0;
+}
+
+void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
+{
+ if (!htt->tx_mem_allocated)
+ return;
+
+ ath10k_htt_free_txbuff(htt);
+ ath10k_htt_tx_free_txq(htt);
+ ath10k_htt_free_frag_desc(htt);
+ ath10k_htt_tx_free_txdone_fifo(htt);
+ htt->tx_mem_allocated = false;
+}
+
+static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt)
+{
+ ath10k_htc_stop_hl(htt->ar);
+ idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
+}
+
+void ath10k_htt_tx_stop(struct ath10k_htt *htt)
+{
+ ath10k_htt_flush_tx_queue(htt);
+ idr_destroy(&htt->pending_tx);
+}
+
+void ath10k_htt_tx_free(struct ath10k_htt *htt)
+{
+ ath10k_htt_tx_stop(htt);
+ ath10k_htt_tx_destroy(htt);
+}
+
+void ath10k_htt_op_ep_tx_credits(struct ath10k *ar)
+{
+ queue_work(ar->workqueue, &ar->bundle_tx_work);
+}
+
+void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_tx_done tx_done = {};
+ struct htt_cmd_hdr *htt_hdr;
+ struct htt_data_tx_desc *desc_hdr = NULL;
+ u16 flags1 = 0;
+ u8 msg_type = 0;
+
+ if (htt->disable_tx_comp) {
+ htt_hdr = (struct htt_cmd_hdr *)skb->data;
+ msg_type = htt_hdr->msg_type;
+
+ if (msg_type == HTT_H2T_MSG_TYPE_TX_FRM) {
+ desc_hdr = (struct htt_data_tx_desc *)
+ (skb->data + sizeof(*htt_hdr));
+ flags1 = __le16_to_cpu(desc_hdr->flags1);
+ skb_pull(skb, sizeof(struct htt_cmd_hdr));
+ skb_pull(skb, sizeof(struct htt_data_tx_desc));
+ }
+ }
+
+ dev_kfree_skb_any(skb);
+
+ if ((!htt->disable_tx_comp) || (msg_type != HTT_H2T_MSG_TYPE_TX_FRM))
+ return;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt tx complete msdu id:%u ,flags1:%x\n",
+ __le16_to_cpu(desc_hdr->id), flags1);
+
+ if (flags1 & HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE)
+ return;
+
+ tx_done.status = HTT_TX_COMPL_STATE_ACK;
+ tx_done.msdu_id = __le16_to_cpu(desc_hdr->id);
+ ath10k_txrx_tx_unref(&ar->htt, &tx_done);
+}
+
+void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
+
+int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len = 0;
+ int ret;
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->ver_req);
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask,
+ u64 cookie)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_stats_req *req;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len = 0, ret;
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->stats_req);
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
+
+ req = &cmd->stats_req;
+
+ memset(req, 0, sizeof(*req));
+
+ /* currently we support only max 24 bit masks so no need to worry
+ * about endian support
+ */
+ memcpy(req->upload_types, &mask, 3);
+ memcpy(req->reset_types, &reset_mask, 3);
+ req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
+ req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
+ req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to send htt type stats request: %d",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_frag_desc_bank_cfg32 *cfg;
+ int ret, size;
+ u8 info;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ if (!htt->frag_desc.paddr) {
+ ath10k_warn(ar, "invalid frag desc memory\n");
+ return -EINVAL;
+ }
+
+ size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);
+ skb = ath10k_htc_alloc_skb(ar, size);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, size);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+
+ info = 0;
+ info |= SM(htt->tx_q_state.type,
+ HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
+
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
+
+ cfg = &cmd->frag_desc_bank_cfg32;
+ cfg->info = info;
+ cfg->num_banks = 1;
+ cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
+ cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr);
+ cfg->bank_id[0].bank_min_id = 0;
+ cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
+ 1);
+
+ cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
+ cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
+ cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
+ cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
+ cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_frag_desc_bank_cfg64 *cfg;
+ int ret, size;
+ u8 info;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ if (!htt->frag_desc.paddr) {
+ ath10k_warn(ar, "invalid frag desc memory\n");
+ return -EINVAL;
+ }
+
+ size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);
+ skb = ath10k_htc_alloc_skb(ar, size);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, size);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+
+ info = 0;
+ info |= SM(htt->tx_q_state.type,
+ HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
+
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
+
+ cfg = &cmd->frag_desc_bank_cfg64;
+ cfg->info = info;
+ cfg->num_banks = 1;
+ cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);
+ cfg->bank_base_addrs[0] = __cpu_to_le64(htt->frag_desc.paddr);
+ cfg->bank_id[0].bank_min_id = 0;
+ cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
+ 1);
+
+ cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
+ cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
+ cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
+ cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
+ cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw,
+ struct htt_rx_ring_setup_ring32 *rx_ring)
+{
+ ath10k_htt_rx_desc_get_offsets(hw, &rx_ring->offsets);
+}
+
+static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw,
+ struct htt_rx_ring_setup_ring64 *rx_ring)
+{
+ ath10k_htt_rx_desc_get_offsets(hw, &rx_ring->offsets);
+}
+
+static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_rx_ring_setup_ring32 *ring;
+ const int num_rx_ring = 1;
+ u16 flags;
+ u32 fw_idx;
+ int len;
+ int ret;
+
+ /*
+ * the HW expects the buffer to be an integral number of 4-byte
+ * "words"
+ */
+ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+ BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+ len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
+ + (sizeof(*ring) * num_rx_ring);
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_cmd *)skb->data;
+ ring = &cmd->rx_setup_32.rings[0];
+
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+ cmd->rx_setup_32.hdr.num_rings = 1;
+
+ /* FIXME: do we need all of this? */
+ flags = 0;
+ flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
+ flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+ flags |= HTT_RX_RING_FLAGS_PPDU_START;
+ flags |= HTT_RX_RING_FLAGS_PPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MPDU_START;
+ flags |= HTT_RX_RING_FLAGS_MPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MSDU_START;
+ flags |= HTT_RX_RING_FLAGS_MSDU_END;
+ flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
+ flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
+ flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_CTRL_RX;
+ flags |= HTT_RX_RING_FLAGS_MGMT_RX;
+ flags |= HTT_RX_RING_FLAGS_NULL_RX;
+ flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
+
+ fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+ ring->fw_idx_shadow_reg_paddr =
+ __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
+ ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
+ ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
+ ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+ ring->flags = __cpu_to_le16(flags);
+ ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
+
+ ath10k_htt_fill_rx_desc_offset_32(hw, ring);
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_rx_ring_setup_ring64 *ring;
+ const int num_rx_ring = 1;
+ u16 flags;
+ u32 fw_idx;
+ int len;
+ int ret;
+
+ /* HW expects the buffer to be an integral number of 4-byte
+ * "words"
+ */
+ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+ BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+ len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)
+ + (sizeof(*ring) * num_rx_ring);
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_cmd *)skb->data;
+ ring = &cmd->rx_setup_64.rings[0];
+
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+ cmd->rx_setup_64.hdr.num_rings = 1;
+
+ flags = 0;
+ flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
+ flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+ flags |= HTT_RX_RING_FLAGS_PPDU_START;
+ flags |= HTT_RX_RING_FLAGS_PPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MPDU_START;
+ flags |= HTT_RX_RING_FLAGS_MPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MSDU_START;
+ flags |= HTT_RX_RING_FLAGS_MSDU_END;
+ flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
+ flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
+ flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_CTRL_RX;
+ flags |= HTT_RX_RING_FLAGS_MGMT_RX;
+ flags |= HTT_RX_RING_FLAGS_NULL_RX;
+ flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
+
+ fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+ ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
+ ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
+ ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
+ ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+ ring->flags = __cpu_to_le16(flags);
+ ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
+
+ ath10k_htt_fill_rx_desc_offset_64(hw, ring);
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_rx_ring_setup_ring32 *ring;
+ const int num_rx_ring = 1;
+ u16 flags;
+ int len;
+ int ret;
+
+ /*
+ * the HW expects the buffer to be an integral number of 4-byte
+ * "words"
+ */
+ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+ BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+ len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
+ + (sizeof(*ring) * num_rx_ring);
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_cmd *)skb->data;
+ ring = &cmd->rx_setup_32.rings[0];
+
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+ cmd->rx_setup_32.hdr.num_rings = 1;
+
+ flags = 0;
+ flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+ flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+
+ memset(ring, 0, sizeof(*ring));
+ ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN);
+ ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+ ring->flags = __cpu_to_le16(flags);
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_htt_h2t_aggr_cfg_msg_32(struct ath10k_htt *htt,
+ u8 max_subfrms_ampdu,
+ u8 max_subfrms_amsdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_aggr_conf *aggr_conf;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len;
+ int ret;
+
+ /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
+
+ if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
+ return -EINVAL;
+
+ if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
+ return -EINVAL;
+
+ len = sizeof(cmd->hdr);
+ len += sizeof(cmd->aggr_conf);
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
+
+ aggr_conf = &cmd->aggr_conf;
+ aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
+ aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
+ aggr_conf->max_num_amsdu_subframes,
+ aggr_conf->max_num_ampdu_subframes);
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt *htt,
+ u8 max_subfrms_ampdu,
+ u8 max_subfrms_amsdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_aggr_conf_v2 *aggr_conf;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len;
+ int ret;
+
+ /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
+
+ if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
+ return -EINVAL;
+
+ if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
+ return -EINVAL;
+
+ len = sizeof(cmd->hdr);
+ len += sizeof(cmd->aggr_conf_v2);
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
+
+ aggr_conf = &cmd->aggr_conf_v2;
+ aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
+ aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
+ aggr_conf->max_num_amsdu_subframes,
+ aggr_conf->max_num_ampdu_subframes);
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+ __le32 token,
+ __le16 fetch_seq_num,
+ struct htt_tx_fetch_record *records,
+ size_t num_records)
+{
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ const u16 resp_id = 0;
+ int len = 0;
+ int ret;
+
+ /* Response IDs are echo-ed back only for host driver convenience
+ * purposes. They aren't used for anything in the driver yet so use 0.
+ */
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->tx_fetch_resp);
+ len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
+ cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
+ cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
+ cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
+ cmd->tx_fetch_resp.token = token;
+
+ memcpy(cmd->tx_fetch_resp.records, records,
+ sizeof(records[0]) * num_records);
+
+ ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
+ goto err_free_skb;
+ }
+
+ return 0;
+
+err_free_skb:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+ struct ath10k_vif *arvif;
+
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ return ar->scan.vdev_id;
+ } else if (cb->vif) {
+ arvif = (void *)cb->vif->drv_priv;
+ return arvif->vdev_id;
+ } else if (ar->monitor_started) {
+ return ar->monitor_vdev_id;
+ } else {
+ return 0;
+ }
+}
+
+static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+
+ if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
+ return HTT_DATA_TX_EXT_TID_MGMT;
+ else if (cb->flags & ATH10K_SKB_F_QOS)
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ else
+ return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+}
+
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
+ struct sk_buff *txdesc = NULL;
+ struct htt_cmd *cmd;
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+ int len = 0;
+ int msdu_id = -1;
+ int res;
+ const u8 *peer_addr;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->mgmt_tx);
+
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ if (res < 0)
+ goto err;
+
+ msdu_id = res;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ peer_addr = hdr->addr1;
+ if (is_multicast_ether_addr(peer_addr)) {
+ skb_put(msdu, sizeof(struct ieee80211_mmie_16));
+ } else {
+ if (skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
+ skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256)
+ skb_put(msdu, IEEE80211_GCMP_MIC_LEN);
+ else
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
+ }
+
+ txdesc = ath10k_htc_alloc_skb(ar, len);
+ if (!txdesc) {
+ res = -ENOMEM;
+ goto err_free_msdu_id;
+ }
+
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+ if (res) {
+ res = -EIO;
+ goto err_free_txdesc;
+ }
+
+ skb_put(txdesc, len);
+ cmd = (struct htt_cmd *)txdesc->data;
+ memset(cmd, 0, len);
+
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
+ cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
+ cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
+ cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
+ cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
+ memcpy(cmd->mgmt_tx.hdr, msdu->data,
+ min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
+
+ res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
+ if (res)
+ goto err_unmap_msdu;
+
+ return 0;
+
+err_unmap_msdu:
+ if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_txdesc:
+ dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err:
+ return res;
+}
+
+#define HTT_TX_HL_NEEDED_HEADROOM \
+ (unsigned int)(sizeof(struct htt_cmd_hdr) + \
+ sizeof(struct htt_data_tx_desc) + \
+ sizeof(struct ath10k_htc_hdr))
+
+static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu)
+{
+ struct ath10k *ar = htt->ar;
+ int res, data_len;
+ struct htt_cmd_hdr *cmd_hdr;
+ struct htt_data_tx_desc *tx_desc;
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ struct sk_buff *tmp_skb;
+ bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
+ u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+ u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
+ u8 flags0 = 0;
+ u16 flags1 = 0;
+ u16 msdu_id = 0;
+
+ if (!is_eth) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
+ }
+
+ data_len = msdu->len;
+
+ switch (txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+ fallthrough;
+ case ATH10K_HW_TXRX_ETHERNET:
+ flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ break;
+ case ATH10K_HW_TXRX_MGMT:
+ flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+
+ if (htt->disable_tx_comp)
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE;
+ break;
+ }
+
+ if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
+ flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+ flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+ if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+ !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+ }
+
+ /* Prepend the HTT header and TX desc struct to the data message
+ * and realloc the skb if it does not have enough headroom.
+ */
+ if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) {
+ tmp_skb = msdu;
+
+ ath10k_dbg(htt->ar, ATH10K_DBG_HTT,
+ "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n",
+ skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM);
+ msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM);
+ kfree_skb(tmp_skb);
+ if (!msdu) {
+ ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n");
+ res = -ENOMEM;
+ goto out;
+ }
+ }
+
+ if (ar->bus_param.hl_msdu_ids) {
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ if (res < 0) {
+ ath10k_err(ar, "msdu_id allocation failed %d\n", res);
+ goto out;
+ }
+ msdu_id = res;
+ }
+
+ /* As msdu is freed by mac80211 (in ieee80211_tx_status()) and by
+ * ath10k (in ath10k_htt_htc_tx_complete()) we have to increase
+ * reference by one to avoid a use-after-free case and a double
+ * free.
+ */
+ skb_get(msdu);
+
+ skb_push(msdu, sizeof(*cmd_hdr));
+ skb_push(msdu, sizeof(*tx_desc));
+ cmd_hdr = (struct htt_cmd_hdr *)msdu->data;
+ tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr));
+
+ cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+ tx_desc->flags0 = flags0;
+ tx_desc->flags1 = __cpu_to_le16(flags1);
+ tx_desc->len = __cpu_to_le16(data_len);
+ tx_desc->id = __cpu_to_le16(msdu_id);
+ tx_desc->frags_paddr = 0; /* always zero */
+ /* Initialize peer_id to INVALID_PEER because this is NOT
+ * Reinjection path
+ */
+ tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID);
+
+ res = ath10k_htc_send_hl(&htt->ar->htc, htt->eid, msdu);
+
+out:
+ return res;
+}
+
+static int ath10k_htt_tx_32(struct ath10k_htt *htt,
+ enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ struct ath10k_hif_sg_item sg_items[2];
+ struct ath10k_htt_txbuf_32 *txbuf;
+ struct htt_data_tx_desc_frag *frags;
+ bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
+ u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+ u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
+ int prefetch_len;
+ int res;
+ u8 flags0 = 0;
+ u16 msdu_id, flags1 = 0;
+ u16 freq = 0;
+ u32 frags_paddr = 0;
+ u32 txbuf_paddr;
+ struct htt_msdu_ext_desc *ext_desc = NULL;
+ struct htt_msdu_ext_desc *ext_desc_t = NULL;
+
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ if (res < 0)
+ goto err;
+
+ msdu_id = res;
+
+ prefetch_len = min(htt->prefetch_len, msdu->len);
+ prefetch_len = roundup(prefetch_len, 4);
+
+ txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id;
+ txbuf_paddr = htt->txbuf.paddr +
+ (sizeof(struct ath10k_htt_txbuf_32) * msdu_id);
+
+ if (!is_eth) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
+ txmode == ATH10K_HW_TXRX_RAW &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
+ }
+
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+ if (res) {
+ res = -EIO;
+ goto err_free_msdu_id;
+ }
+
+ if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ switch (txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+ fallthrough;
+ case ATH10K_HW_TXRX_ETHERNET:
+ if (ar->hw_params.continuous_frag_desc) {
+ ext_desc_t = htt->frag_desc.vaddr_desc_32;
+ memset(&ext_desc_t[msdu_id], 0,
+ sizeof(struct htt_msdu_ext_desc));
+ frags = (struct htt_data_tx_desc_frag *)
+ &ext_desc_t[msdu_id].frags;
+ ext_desc = &ext_desc_t[msdu_id];
+ frags[0].tword_addr.paddr_lo =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].tword_addr.paddr_hi = 0;
+ frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+
+ frags_paddr = htt->frag_desc.paddr +
+ (sizeof(struct htt_msdu_ext_desc) * msdu_id);
+ } else {
+ frags = txbuf->frags;
+ frags[0].dword_addr.paddr =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
+ frags[1].dword_addr.paddr = 0;
+ frags[1].dword_addr.len = 0;
+
+ frags_paddr = txbuf_paddr;
+ }
+ flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ break;
+ case ATH10K_HW_TXRX_MGMT:
+ flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+
+ frags_paddr = skb_cb->paddr;
+ break;
+ }
+
+ /* Normally all commands go through HTC which manages tx credits for
+ * each endpoint and notifies when tx is completed.
+ *
+ * HTT endpoint is creditless so there's no need to care about HTC
+ * flags. In that case it is trivial to fill the HTC header here.
+ *
+ * MSDU transmission is considered completed upon HTT event. This
+ * implies no relevant resources can be freed until after the event is
+ * received. That's why HTC tx completion handler itself is ignored by
+ * setting NULL to transfer_context for all sg items.
+ *
+ * There is simply no point in pushing HTT TX_FRM through HTC tx path
+ * as it's a waste of resources. By bypassing HTC it is possible to
+ * avoid extra memory allocations, compress data structures and thus
+ * improve performance.
+ */
+
+ txbuf->htc_hdr.eid = htt->eid;
+ txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
+ sizeof(txbuf->cmd_tx) +
+ prefetch_len);
+ txbuf->htc_hdr.flags = 0;
+
+ if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
+ flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+ flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+ if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+ !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+ if (ar->hw_params.continuous_frag_desc)
+ ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
+ }
+
+ /* Prevent firmware from sending up tx inspection requests. There's
+ * nothing ath10k can do with frames requested for inspection so force
+ * it to simply rely a regular tx completion with discard status.
+ */
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
+
+ txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+ txbuf->cmd_tx.flags0 = flags0;
+ txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+ txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+ txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+ txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+ if (ath10k_mac_tx_frm_has_freq(ar)) {
+ txbuf->cmd_tx.offchan_tx.peerid =
+ __cpu_to_le16(HTT_INVALID_PEERID);
+ txbuf->cmd_tx.offchan_tx.freq =
+ __cpu_to_le16(freq);
+ } else {
+ txbuf->cmd_tx.peerid =
+ __cpu_to_le32(HTT_INVALID_PEERID);
+ }
+
+ trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n",
+ flags0, flags1, msdu->len, msdu_id, &frags_paddr,
+ &skb_cb->paddr, vdev_id, tid, freq);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
+ msdu->data, msdu->len);
+ trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
+ trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
+
+ sg_items[0].transfer_id = 0;
+ sg_items[0].transfer_context = NULL;
+ sg_items[0].vaddr = &txbuf->htc_hdr;
+ sg_items[0].paddr = txbuf_paddr +
+ sizeof(txbuf->frags);
+ sg_items[0].len = sizeof(txbuf->htc_hdr) +
+ sizeof(txbuf->cmd_hdr) +
+ sizeof(txbuf->cmd_tx);
+
+ sg_items[1].transfer_id = 0;
+ sg_items[1].transfer_context = NULL;
+ sg_items[1].vaddr = msdu->data;
+ sg_items[1].paddr = skb_cb->paddr;
+ sg_items[1].len = prefetch_len;
+
+ res = ath10k_hif_tx_sg(htt->ar,
+ htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
+ sg_items, ARRAY_SIZE(sg_items));
+ if (res)
+ goto err_unmap_msdu;
+
+ return 0;
+
+err_unmap_msdu:
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err:
+ return res;
+}
+
+static int ath10k_htt_tx_64(struct ath10k_htt *htt,
+ enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ struct ath10k_hif_sg_item sg_items[2];
+ struct ath10k_htt_txbuf_64 *txbuf;
+ struct htt_data_tx_desc_frag *frags;
+ bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
+ u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+ u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
+ int prefetch_len;
+ int res;
+ u8 flags0 = 0;
+ u16 msdu_id, flags1 = 0;
+ u16 freq = 0;
+ dma_addr_t frags_paddr = 0;
+ dma_addr_t txbuf_paddr;
+ struct htt_msdu_ext_desc_64 *ext_desc = NULL;
+ struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
+
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ if (res < 0)
+ goto err;
+
+ msdu_id = res;
+
+ prefetch_len = min(htt->prefetch_len, msdu->len);
+ prefetch_len = roundup(prefetch_len, 4);
+
+ txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id;
+ txbuf_paddr = htt->txbuf.paddr +
+ (sizeof(struct ath10k_htt_txbuf_64) * msdu_id);
+
+ if (!is_eth) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
+ txmode == ATH10K_HW_TXRX_RAW &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
+ }
+
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+ if (res) {
+ res = -EIO;
+ goto err_free_msdu_id;
+ }
+
+ if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ switch (txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+ fallthrough;
+ case ATH10K_HW_TXRX_ETHERNET:
+ if (ar->hw_params.continuous_frag_desc) {
+ ext_desc_t = htt->frag_desc.vaddr_desc_64;
+ memset(&ext_desc_t[msdu_id], 0,
+ sizeof(struct htt_msdu_ext_desc_64));
+ frags = (struct htt_data_tx_desc_frag *)
+ &ext_desc_t[msdu_id].frags;
+ ext_desc = &ext_desc_t[msdu_id];
+ frags[0].tword_addr.paddr_lo =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].tword_addr.paddr_hi =
+ __cpu_to_le16(upper_32_bits(skb_cb->paddr));
+ frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+
+ frags_paddr = htt->frag_desc.paddr +
+ (sizeof(struct htt_msdu_ext_desc_64) * msdu_id);
+ } else {
+ frags = txbuf->frags;
+ frags[0].tword_addr.paddr_lo =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].tword_addr.paddr_hi =
+ __cpu_to_le16(upper_32_bits(skb_cb->paddr));
+ frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+ frags[1].tword_addr.paddr_lo = 0;
+ frags[1].tword_addr.paddr_hi = 0;
+ frags[1].tword_addr.len_16 = 0;
+ }
+ flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ break;
+ case ATH10K_HW_TXRX_MGMT:
+ flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+
+ frags_paddr = skb_cb->paddr;
+ break;
+ }
+
+ /* Normally all commands go through HTC which manages tx credits for
+ * each endpoint and notifies when tx is completed.
+ *
+ * HTT endpoint is creditless so there's no need to care about HTC
+ * flags. In that case it is trivial to fill the HTC header here.
+ *
+ * MSDU transmission is considered completed upon HTT event. This
+ * implies no relevant resources can be freed until after the event is
+ * received. That's why HTC tx completion handler itself is ignored by
+ * setting NULL to transfer_context for all sg items.
+ *
+ * There is simply no point in pushing HTT TX_FRM through HTC tx path
+ * as it's a waste of resources. By bypassing HTC it is possible to
+ * avoid extra memory allocations, compress data structures and thus
+ * improve performance.
+ */
+
+ txbuf->htc_hdr.eid = htt->eid;
+ txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
+ sizeof(txbuf->cmd_tx) +
+ prefetch_len);
+ txbuf->htc_hdr.flags = 0;
+
+ if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
+ flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+ flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+ if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+ !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+ if (ar->hw_params.continuous_frag_desc) {
+ memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag));
+ ext_desc->tso_flag[3] |=
+ __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64);
+ }
+ }
+
+ /* Prevent firmware from sending up tx inspection requests. There's
+ * nothing ath10k can do with frames requested for inspection so force
+ * it to simply rely a regular tx completion with discard status.
+ */
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
+
+ txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+ txbuf->cmd_tx.flags0 = flags0;
+ txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+ txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+ txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+
+ /* fill fragment descriptor */
+ txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr);
+ if (ath10k_mac_tx_frm_has_freq(ar)) {
+ txbuf->cmd_tx.offchan_tx.peerid =
+ __cpu_to_le16(HTT_INVALID_PEERID);
+ txbuf->cmd_tx.offchan_tx.freq =
+ __cpu_to_le16(freq);
+ } else {
+ txbuf->cmd_tx.peerid =
+ __cpu_to_le32(HTT_INVALID_PEERID);
+ }
+
+ trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n",
+ flags0, flags1, msdu->len, msdu_id, &frags_paddr,
+ &skb_cb->paddr, vdev_id, tid, freq);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
+ msdu->data, msdu->len);
+ trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
+ trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
+
+ sg_items[0].transfer_id = 0;
+ sg_items[0].transfer_context = NULL;
+ sg_items[0].vaddr = &txbuf->htc_hdr;
+ sg_items[0].paddr = txbuf_paddr +
+ sizeof(txbuf->frags);
+ sg_items[0].len = sizeof(txbuf->htc_hdr) +
+ sizeof(txbuf->cmd_hdr) +
+ sizeof(txbuf->cmd_tx);
+
+ sg_items[1].transfer_id = 0;
+ sg_items[1].transfer_context = NULL;
+ sg_items[1].vaddr = msdu->data;
+ sg_items[1].paddr = skb_cb->paddr;
+ sg_items[1].len = prefetch_len;
+
+ res = ath10k_hif_tx_sg(htt->ar,
+ htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
+ sg_items, ARRAY_SIZE(sg_items));
+ if (res)
+ goto err_unmap_msdu;
+
+ return 0;
+
+err_unmap_msdu:
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err:
+ return res;
+}
+
+static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
+ .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,
+ .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
+ .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,
+ .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,
+ .htt_tx = ath10k_htt_tx_32,
+ .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,
+ .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,
+ .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
+};
+
+static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
+ .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,
+ .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,
+ .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,
+ .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,
+ .htt_tx = ath10k_htt_tx_64,
+ .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64,
+ .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
+ .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_v2,
+};
+
+static const struct ath10k_htt_tx_ops htt_tx_ops_hl = {
+ .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl,
+ .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
+ .htt_tx = ath10k_htt_tx_hl,
+ .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32,
+ .htt_flush_tx = ath10k_htt_flush_tx_queue,
+};
+
+void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
+ htt->tx_ops = &htt_tx_ops_hl;
+ else if (ar->hw_params.target_64bit)
+ htt->tx_ops = &htt_tx_ops_64;
+ else
+ htt->tx_ops = &htt_tx_ops_32;
+}
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
new file mode 100644
index 000000000000..59b6cebfdd8f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -0,0 +1,1157 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include "core.h"
+#include "hw.h"
+#include "hif.h"
+#include "wmi-ops.h"
+#include "bmi.h"
+#include "rx_desc.h"
+
+const struct ath10k_hw_regs qca988x_regs = {
+ .rtc_soc_base_address = 0x00004000,
+ .rtc_wmac_base_address = 0x00005000,
+ .soc_core_base_address = 0x00009000,
+ .wlan_mac_base_address = 0x00020000,
+ .ce_wrapper_base_address = 0x00057000,
+ .ce0_base_address = 0x00057400,
+ .ce1_base_address = 0x00057800,
+ .ce2_base_address = 0x00057c00,
+ .ce3_base_address = 0x00058000,
+ .ce4_base_address = 0x00058400,
+ .ce5_base_address = 0x00058800,
+ .ce6_base_address = 0x00058c00,
+ .ce7_base_address = 0x00059000,
+ .soc_reset_control_si0_rst_mask = 0x00000001,
+ .soc_reset_control_ce_rst_mask = 0x00040000,
+ .soc_chip_id_address = 0x000000ec,
+ .scratch_3_address = 0x00000030,
+ .fw_indicator_address = 0x00009030,
+ .pcie_local_base_address = 0x00080000,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x00000008,
+ .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00,
+ .pcie_intr_fw_mask = 0x00000400,
+ .pcie_intr_ce_mask_all = 0x0007f800,
+ .pcie_intr_clr_address = 0x00000014,
+};
+
+const struct ath10k_hw_regs qca6174_regs = {
+ .rtc_soc_base_address = 0x00000800,
+ .rtc_wmac_base_address = 0x00001000,
+ .soc_core_base_address = 0x0003a000,
+ .wlan_mac_base_address = 0x00010000,
+ .ce_wrapper_base_address = 0x00034000,
+ .ce0_base_address = 0x00034400,
+ .ce1_base_address = 0x00034800,
+ .ce2_base_address = 0x00034c00,
+ .ce3_base_address = 0x00035000,
+ .ce4_base_address = 0x00035400,
+ .ce5_base_address = 0x00035800,
+ .ce6_base_address = 0x00035c00,
+ .ce7_base_address = 0x00036000,
+ .soc_reset_control_si0_rst_mask = 0x00000000,
+ .soc_reset_control_ce_rst_mask = 0x00000001,
+ .soc_chip_id_address = 0x000000f0,
+ .scratch_3_address = 0x00000028,
+ .fw_indicator_address = 0x0003a028,
+ .pcie_local_base_address = 0x00080000,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x00000008,
+ .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00,
+ .pcie_intr_fw_mask = 0x00000400,
+ .pcie_intr_ce_mask_all = 0x0007f800,
+ .pcie_intr_clr_address = 0x00000014,
+ .cpu_pll_init_address = 0x00404020,
+ .cpu_speed_address = 0x00404024,
+ .core_clk_div_address = 0x00404028,
+};
+
+const struct ath10k_hw_regs qca99x0_regs = {
+ .rtc_soc_base_address = 0x00080000,
+ .rtc_wmac_base_address = 0x00000000,
+ .soc_core_base_address = 0x00082000,
+ .wlan_mac_base_address = 0x00030000,
+ .ce_wrapper_base_address = 0x0004d000,
+ .ce0_base_address = 0x0004a000,
+ .ce1_base_address = 0x0004a400,
+ .ce2_base_address = 0x0004a800,
+ .ce3_base_address = 0x0004ac00,
+ .ce4_base_address = 0x0004b000,
+ .ce5_base_address = 0x0004b400,
+ .ce6_base_address = 0x0004b800,
+ .ce7_base_address = 0x0004bc00,
+ /* Note: qca99x0 supports up to 12 Copy Engines. Other than address of
+ * CE0 and CE1 no other copy engine is directly referred in the code.
+ * It is not really necessary to assign address for newly supported
+ * CEs in this address table.
+ * Copy Engine Address
+ * CE8 0x0004c000
+ * CE9 0x0004c400
+ * CE10 0x0004c800
+ * CE11 0x0004cc00
+ */
+ .soc_reset_control_si0_rst_mask = 0x00000001,
+ .soc_reset_control_ce_rst_mask = 0x00000100,
+ .soc_chip_id_address = 0x000000ec,
+ .scratch_3_address = 0x00040050,
+ .fw_indicator_address = 0x00040050,
+ .pcie_local_base_address = 0x00000000,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c,
+ .ce_wrap_intr_sum_host_msi_mask = 0x00fff000,
+ .pcie_intr_fw_mask = 0x00100000,
+ .pcie_intr_ce_mask_all = 0x000fff00,
+ .pcie_intr_clr_address = 0x00000010,
+};
+
+const struct ath10k_hw_regs qca4019_regs = {
+ .rtc_soc_base_address = 0x00080000,
+ .soc_core_base_address = 0x00082000,
+ .wlan_mac_base_address = 0x00030000,
+ .ce_wrapper_base_address = 0x0004d000,
+ .ce0_base_address = 0x0004a000,
+ .ce1_base_address = 0x0004a400,
+ .ce2_base_address = 0x0004a800,
+ .ce3_base_address = 0x0004ac00,
+ .ce4_base_address = 0x0004b000,
+ .ce5_base_address = 0x0004b400,
+ .ce6_base_address = 0x0004b800,
+ .ce7_base_address = 0x0004bc00,
+ /* qca4019 supports up to 12 copy engines. Since base address
+ * of ce8 to ce11 are not directly referred in the code,
+ * no need have them in separate members in this table.
+ * Copy Engine Address
+ * CE8 0x0004c000
+ * CE9 0x0004c400
+ * CE10 0x0004c800
+ * CE11 0x0004cc00
+ */
+ .soc_reset_control_si0_rst_mask = 0x00000001,
+ .soc_reset_control_ce_rst_mask = 0x00000100,
+ .soc_chip_id_address = 0x000000ec,
+ .fw_indicator_address = 0x0004f00c,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c,
+ .ce_wrap_intr_sum_host_msi_mask = 0x00fff000,
+ .pcie_intr_fw_mask = 0x00100000,
+ .pcie_intr_ce_mask_all = 0x000fff00,
+ .pcie_intr_clr_address = 0x00000010,
+};
+
+const struct ath10k_hw_values qca988x_values = {
+ .rtc_state_val_on = 3,
+ .ce_count = 8,
+ .msi_assign_ce_max = 7,
+ .num_target_ce_config_wlan = 7,
+ .ce_desc_meta_data_mask = 0xFFFC,
+ .ce_desc_meta_data_lsb = 2,
+};
+
+const struct ath10k_hw_values qca6174_values = {
+ .rtc_state_val_on = 3,
+ .ce_count = 8,
+ .msi_assign_ce_max = 7,
+ .num_target_ce_config_wlan = 7,
+ .ce_desc_meta_data_mask = 0xFFFC,
+ .ce_desc_meta_data_lsb = 2,
+ .rfkill_pin = 16,
+ .rfkill_cfg = 0,
+ .rfkill_on_level = 1,
+};
+
+const struct ath10k_hw_values qca99x0_values = {
+ .rtc_state_val_on = 7,
+ .ce_count = 12,
+ .msi_assign_ce_max = 12,
+ .num_target_ce_config_wlan = 10,
+ .ce_desc_meta_data_mask = 0xFFF0,
+ .ce_desc_meta_data_lsb = 4,
+};
+
+const struct ath10k_hw_values qca9888_values = {
+ .rtc_state_val_on = 3,
+ .ce_count = 12,
+ .msi_assign_ce_max = 12,
+ .num_target_ce_config_wlan = 10,
+ .ce_desc_meta_data_mask = 0xFFF0,
+ .ce_desc_meta_data_lsb = 4,
+};
+
+const struct ath10k_hw_values qca4019_values = {
+ .ce_count = 12,
+ .num_target_ce_config_wlan = 10,
+ .ce_desc_meta_data_mask = 0xFFF0,
+ .ce_desc_meta_data_lsb = 4,
+};
+
+const struct ath10k_hw_regs wcn3990_regs = {
+ .rtc_soc_base_address = 0x00000000,
+ .rtc_wmac_base_address = 0x00000000,
+ .soc_core_base_address = 0x00000000,
+ .ce_wrapper_base_address = 0x0024C000,
+ .ce0_base_address = 0x00240000,
+ .ce1_base_address = 0x00241000,
+ .ce2_base_address = 0x00242000,
+ .ce3_base_address = 0x00243000,
+ .ce4_base_address = 0x00244000,
+ .ce5_base_address = 0x00245000,
+ .ce6_base_address = 0x00246000,
+ .ce7_base_address = 0x00247000,
+ .ce8_base_address = 0x00248000,
+ .ce9_base_address = 0x00249000,
+ .ce10_base_address = 0x0024A000,
+ .ce11_base_address = 0x0024B000,
+ .soc_chip_id_address = 0x000000f0,
+ .soc_reset_control_si0_rst_mask = 0x00000001,
+ .soc_reset_control_ce_rst_mask = 0x00000100,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c,
+ .ce_wrap_intr_sum_host_msi_mask = 0x00fff000,
+ .pcie_intr_fw_mask = 0x00100000,
+};
+
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = {
+ .msb = 0x00000010,
+ .lsb = 0x00000010,
+ .mask = GENMASK(17, 17),
+};
+
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = {
+ .msb = 0x00000012,
+ .lsb = 0x00000012,
+ .mask = GENMASK(18, 18),
+};
+
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = {
+ .msb = 0x00000000,
+ .lsb = 0x00000000,
+ .mask = GENMASK(15, 0),
+};
+
+static const struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = {
+ .addr = 0x00000018,
+ .src_ring = &wcn3990_src_ring,
+ .dst_ring = &wcn3990_dst_ring,
+ .dmax = &wcn3990_dmax,
+};
+
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = {
+ .mask = GENMASK(0, 0),
+};
+
+static const struct ath10k_hw_ce_host_ie wcn3990_host_ie = {
+ .copy_complete = &wcn3990_host_ie_cc,
+};
+
+static const struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
+ .dstr_lmask = 0x00000010,
+ .dstr_hmask = 0x00000008,
+ .srcr_lmask = 0x00000004,
+ .srcr_hmask = 0x00000002,
+ .cc_mask = 0x00000001,
+ .wm_mask = 0x0000001E,
+ .addr = 0x00000030,
+};
+
+static const struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
+ .axi_err = 0x00000100,
+ .dstr_add_err = 0x00000200,
+ .srcr_len_err = 0x00000100,
+ .dstr_mlen_vio = 0x00000080,
+ .dstr_overflow = 0x00000040,
+ .srcr_overflow = 0x00000020,
+ .err_mask = 0x000003E0,
+ .addr = 0x00000038,
+};
+
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = {
+ .msb = 0x00000000,
+ .lsb = 0x00000010,
+ .mask = GENMASK(31, 16),
+};
+
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = {
+ .msb = 0x0000000f,
+ .lsb = 0x00000000,
+ .mask = GENMASK(15, 0),
+};
+
+static const struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
+ .addr = 0x0000004c,
+ .low_rst = 0x00000000,
+ .high_rst = 0x00000000,
+ .wm_low = &wcn3990_src_wm_low,
+ .wm_high = &wcn3990_src_wm_high,
+};
+
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = {
+ .lsb = 0x00000010,
+ .mask = GENMASK(31, 16),
+};
+
+static const struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = {
+ .msb = 0x0000000f,
+ .lsb = 0x00000000,
+ .mask = GENMASK(15, 0),
+};
+
+static const struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
+ .addr = 0x00000050,
+ .low_rst = 0x00000000,
+ .high_rst = 0x00000000,
+ .wm_low = &wcn3990_dst_wm_low,
+ .wm_high = &wcn3990_dst_wm_high,
+};
+
+static const struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
+ .shift = 19,
+ .mask = 0x00080000,
+ .enable = 0x00000000,
+};
+
+const struct ath10k_hw_ce_regs wcn3990_ce_regs = {
+ .sr_base_addr_lo = 0x00000000,
+ .sr_base_addr_hi = 0x00000004,
+ .sr_size_addr = 0x00000008,
+ .dr_base_addr_lo = 0x0000000c,
+ .dr_base_addr_hi = 0x00000010,
+ .dr_size_addr = 0x00000014,
+ .misc_ie_addr = 0x00000034,
+ .sr_wr_index_addr = 0x0000003c,
+ .dst_wr_index_addr = 0x00000040,
+ .current_srri_addr = 0x00000044,
+ .current_drri_addr = 0x00000048,
+ .ce_rri_low = 0x0024C004,
+ .ce_rri_high = 0x0024C008,
+ .host_ie_addr = 0x0000002c,
+ .ctrl1_regs = &wcn3990_ctrl1,
+ .host_ie = &wcn3990_host_ie,
+ .wm_regs = &wcn3990_wm_reg,
+ .misc_regs = &wcn3990_misc_reg,
+ .wm_srcr = &wcn3990_wm_src_ring,
+ .wm_dstr = &wcn3990_wm_dst_ring,
+ .upd = &wcn3990_ctrl1_upd,
+};
+
+const struct ath10k_hw_values wcn3990_values = {
+ .rtc_state_val_on = 5,
+ .ce_count = 12,
+ .msi_assign_ce_max = 12,
+ .num_target_ce_config_wlan = 12,
+ .ce_desc_meta_data_mask = 0xFFF0,
+ .ce_desc_meta_data_lsb = 4,
+};
+
+static const struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
+ .msb = 0x00000010,
+ .lsb = 0x00000010,
+ .mask = GENMASK(16, 16),
+};
+
+static const struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
+ .msb = 0x00000011,
+ .lsb = 0x00000011,
+ .mask = GENMASK(17, 17),
+};
+
+static const struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
+ .msb = 0x0000000f,
+ .lsb = 0x00000000,
+ .mask = GENMASK(15, 0),
+};
+
+static const struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
+ .addr = 0x00000010,
+ .hw_mask = 0x0007ffff,
+ .sw_mask = 0x0007ffff,
+ .hw_wr_mask = 0x00000000,
+ .sw_wr_mask = 0x0007ffff,
+ .reset_mask = 0xffffffff,
+ .reset = 0x00000080,
+ .src_ring = &qcax_src_ring,
+ .dst_ring = &qcax_dst_ring,
+ .dmax = &qcax_dmax,
+};
+
+static const struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
+ .msb = 0x00000003,
+ .lsb = 0x00000003,
+ .mask = GENMASK(3, 3),
+};
+
+static const struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
+ .msb = 0x00000000,
+ .mask = GENMASK(0, 0),
+ .status_reset = 0x00000000,
+ .status = &qcax_cmd_halt_status,
+};
+
+static const struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
+ .msb = 0x00000000,
+ .lsb = 0x00000000,
+ .mask = GENMASK(0, 0),
+};
+
+static const struct ath10k_hw_ce_host_ie qcax_host_ie = {
+ .copy_complete_reset = 0x00000000,
+ .copy_complete = &qcax_host_ie_cc,
+};
+
+static const struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
+ .dstr_lmask = 0x00000010,
+ .dstr_hmask = 0x00000008,
+ .srcr_lmask = 0x00000004,
+ .srcr_hmask = 0x00000002,
+ .cc_mask = 0x00000001,
+ .wm_mask = 0x0000001E,
+ .addr = 0x00000030,
+};
+
+static const struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
+ .axi_err = 0x00000400,
+ .dstr_add_err = 0x00000200,
+ .srcr_len_err = 0x00000100,
+ .dstr_mlen_vio = 0x00000080,
+ .dstr_overflow = 0x00000040,
+ .srcr_overflow = 0x00000020,
+ .err_mask = 0x000007E0,
+ .addr = 0x00000038,
+};
+
+static const struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
+ .msb = 0x0000001f,
+ .lsb = 0x00000010,
+ .mask = GENMASK(31, 16),
+};
+
+static const struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
+ .msb = 0x0000000f,
+ .lsb = 0x00000000,
+ .mask = GENMASK(15, 0),
+};
+
+static const struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
+ .addr = 0x0000004c,
+ .low_rst = 0x00000000,
+ .high_rst = 0x00000000,
+ .wm_low = &qcax_src_wm_low,
+ .wm_high = &qcax_src_wm_high,
+};
+
+static const struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
+ .lsb = 0x00000010,
+ .mask = GENMASK(31, 16),
+};
+
+static const struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
+ .msb = 0x0000000f,
+ .lsb = 0x00000000,
+ .mask = GENMASK(15, 0),
+};
+
+static const struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
+ .addr = 0x00000050,
+ .low_rst = 0x00000000,
+ .high_rst = 0x00000000,
+ .wm_low = &qcax_dst_wm_low,
+ .wm_high = &qcax_dst_wm_high,
+};
+
+const struct ath10k_hw_ce_regs qcax_ce_regs = {
+ .sr_base_addr_lo = 0x00000000,
+ .sr_size_addr = 0x00000004,
+ .dr_base_addr_lo = 0x00000008,
+ .dr_size_addr = 0x0000000c,
+ .ce_cmd_addr = 0x00000018,
+ .misc_ie_addr = 0x00000034,
+ .sr_wr_index_addr = 0x0000003c,
+ .dst_wr_index_addr = 0x00000040,
+ .current_srri_addr = 0x00000044,
+ .current_drri_addr = 0x00000048,
+ .host_ie_addr = 0x0000002c,
+ .ctrl1_regs = &qcax_ctrl1,
+ .cmd_halt = &qcax_cmd_halt,
+ .host_ie = &qcax_host_ie,
+ .wm_regs = &qcax_wm_reg,
+ .misc_regs = &qcax_misc_reg,
+ .wm_srcr = &qcax_wm_src_ring,
+ .wm_dstr = &qcax_wm_dst_ring,
+};
+
+const struct ath10k_hw_clk_params qca6174_clk[ATH10K_HW_REFCLK_COUNT] = {
+ {
+ .refclk = 48000000,
+ .div = 0xe,
+ .rnfrac = 0x2aaa8,
+ .settle_time = 2400,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 19200000,
+ .div = 0x24,
+ .rnfrac = 0x2aaa8,
+ .settle_time = 960,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 24000000,
+ .div = 0x1d,
+ .rnfrac = 0x15551,
+ .settle_time = 1200,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 26000000,
+ .div = 0x1b,
+ .rnfrac = 0x4ec4,
+ .settle_time = 1300,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 37400000,
+ .div = 0x12,
+ .rnfrac = 0x34b49,
+ .settle_time = 1870,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 38400000,
+ .div = 0x12,
+ .rnfrac = 0x15551,
+ .settle_time = 1920,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 40000000,
+ .div = 0x12,
+ .rnfrac = 0x26665,
+ .settle_time = 2000,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 52000000,
+ .div = 0x1b,
+ .rnfrac = 0x4ec4,
+ .settle_time = 2600,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+};
+
+void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
+ u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
+{
+ u32 cc_fix = 0;
+ u32 rcc_fix = 0;
+ enum ath10k_hw_cc_wraparound_type wraparound_type;
+
+ survey->filled |= SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY;
+
+ wraparound_type = ar->hw_params.cc_wraparound_type;
+
+ if (cc < cc_prev || rcc < rcc_prev) {
+ switch (wraparound_type) {
+ case ATH10K_HW_CC_WRAP_SHIFTED_ALL:
+ if (cc < cc_prev) {
+ cc_fix = 0x7fffffff;
+ survey->filled &= ~SURVEY_INFO_TIME_BUSY;
+ }
+ break;
+ case ATH10K_HW_CC_WRAP_SHIFTED_EACH:
+ if (cc < cc_prev)
+ cc_fix = 0x7fffffff;
+
+ if (rcc < rcc_prev)
+ rcc_fix = 0x7fffffff;
+ break;
+ case ATH10K_HW_CC_WRAP_DISABLED:
+ break;
+ }
+ }
+
+ cc -= cc_prev - cc_fix;
+ rcc -= rcc_prev - rcc_fix;
+
+ survey->time = CCNT_TO_MSEC(ar, cc);
+ survey->time_busy = CCNT_TO_MSEC(ar, rcc);
+}
+
+/* The firmware does not support setting the coverage class. Instead this
+ * function monitors and modifies the corresponding MAC registers.
+ */
+static void ath10k_hw_qca988x_set_coverage_class(struct ath10k *ar,
+ int radio_idx,
+ s16 value)
+{
+ u32 slottime_reg;
+ u32 slottime;
+ u32 timeout_reg;
+ u32 ack_timeout;
+ u32 cts_timeout;
+ u32 phyclk_reg;
+ u32 phyclk;
+ u64 fw_dbglog_mask;
+ u32 fw_dbglog_level;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* Only modify registers if the core is started. */
+ if ((ar->state != ATH10K_STATE_ON) &&
+ (ar->state != ATH10K_STATE_RESTARTED)) {
+ spin_lock_bh(&ar->data_lock);
+ /* Store config value for when radio boots up */
+ ar->fw_coverage.coverage_class = value;
+ spin_unlock_bh(&ar->data_lock);
+ goto unlock;
+ }
+
+ /* Retrieve the current values of the two registers that need to be
+ * adjusted.
+ */
+ slottime_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
+ WAVE1_PCU_GBL_IFS_SLOT);
+ timeout_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
+ WAVE1_PCU_ACK_CTS_TIMEOUT);
+ phyclk_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
+ WAVE1_PHYCLK);
+ phyclk = MS(phyclk_reg, WAVE1_PHYCLK_USEC) + 1;
+
+ if (value < 0)
+ value = ar->fw_coverage.coverage_class;
+
+ /* Break out if the coverage class and registers have the expected
+ * value.
+ */
+ if (value == ar->fw_coverage.coverage_class &&
+ slottime_reg == ar->fw_coverage.reg_slottime_conf &&
+ timeout_reg == ar->fw_coverage.reg_ack_cts_timeout_conf &&
+ phyclk_reg == ar->fw_coverage.reg_phyclk)
+ goto unlock;
+
+ /* Store new initial register values from the firmware. */
+ if (slottime_reg != ar->fw_coverage.reg_slottime_conf)
+ ar->fw_coverage.reg_slottime_orig = slottime_reg;
+ if (timeout_reg != ar->fw_coverage.reg_ack_cts_timeout_conf)
+ ar->fw_coverage.reg_ack_cts_timeout_orig = timeout_reg;
+ ar->fw_coverage.reg_phyclk = phyclk_reg;
+
+ /* Calculate new value based on the (original) firmware calculation. */
+ slottime_reg = ar->fw_coverage.reg_slottime_orig;
+ timeout_reg = ar->fw_coverage.reg_ack_cts_timeout_orig;
+
+ /* Do some sanity checks on the slottime register. */
+ if (slottime_reg % phyclk) {
+ ath10k_warn(ar,
+ "failed to set coverage class: expected integer microsecond value in register\n");
+
+ goto store_regs;
+ }
+
+ slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT);
+ slottime = slottime / phyclk;
+ if (slottime != 9 && slottime != 20) {
+ ath10k_warn(ar,
+ "failed to set coverage class: expected slot time of 9 or 20us in HW register. It is %uus.\n",
+ slottime);
+
+ goto store_regs;
+ }
+
+ /* Recalculate the register values by adding the additional propagation
+ * delay (3us per coverage class).
+ */
+
+ slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT);
+ slottime += value * 3 * phyclk;
+ slottime = min_t(u32, slottime, WAVE1_PCU_GBL_IFS_SLOT_MAX);
+ slottime = SM(slottime, WAVE1_PCU_GBL_IFS_SLOT);
+ slottime_reg = (slottime_reg & ~WAVE1_PCU_GBL_IFS_SLOT_MASK) | slottime;
+
+ /* Update ack timeout (lower halfword). */
+ ack_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK);
+ ack_timeout += 3 * value * phyclk;
+ ack_timeout = min_t(u32, ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX);
+ ack_timeout = SM(ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK);
+
+ /* Update cts timeout (upper halfword). */
+ cts_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS);
+ cts_timeout += 3 * value * phyclk;
+ cts_timeout = min_t(u32, cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX);
+ cts_timeout = SM(cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS);
+
+ timeout_reg = ack_timeout | cts_timeout;
+
+ ath10k_hif_write32(ar,
+ WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_GBL_IFS_SLOT,
+ slottime_reg);
+ ath10k_hif_write32(ar,
+ WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_ACK_CTS_TIMEOUT,
+ timeout_reg);
+
+ /* Ensure we have a debug level of WARN set for the case that the
+ * coverage class is larger than 0. This is important as we need to
+ * set the registers again if the firmware does an internal reset and
+ * this way we will be notified of the event.
+ */
+ fw_dbglog_mask = ath10k_debug_get_fw_dbglog_mask(ar);
+ fw_dbglog_level = ath10k_debug_get_fw_dbglog_level(ar);
+
+ if (value > 0) {
+ if (fw_dbglog_level > ATH10K_DBGLOG_LEVEL_WARN)
+ fw_dbglog_level = ATH10K_DBGLOG_LEVEL_WARN;
+ fw_dbglog_mask = ~0;
+ }
+
+ ath10k_wmi_dbglog_cfg(ar, fw_dbglog_mask, fw_dbglog_level);
+
+store_regs:
+ /* After an error we will not retry setting the coverage class. */
+ spin_lock_bh(&ar->data_lock);
+ ar->fw_coverage.coverage_class = value;
+ spin_unlock_bh(&ar->data_lock);
+
+ ar->fw_coverage.reg_slottime_conf = slottime_reg;
+ ar->fw_coverage.reg_ack_cts_timeout_conf = timeout_reg;
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/**
+ * ath10k_hw_qca6174_enable_pll_clock() - enable the qca6174 hw pll clock
+ * @ar: the ath10k blob
+ *
+ * This function is very hardware specific, the clock initialization
+ * steps is very sensitive and could lead to unknown crash, so they
+ * should be done in sequence.
+ *
+ * *** Be aware if you planned to refactor them. ***
+ *
+ * Return: 0 if successfully enable the pll, otherwise EINVAL
+ */
+static int ath10k_hw_qca6174_enable_pll_clock(struct ath10k *ar)
+{
+ int ret, wait_limit;
+ u32 clk_div_addr, pll_init_addr, speed_addr;
+ u32 addr, reg_val, mem_val;
+ struct ath10k_hw_params *hw;
+ const struct ath10k_hw_clk_params *hw_clk;
+
+ hw = &ar->hw_params;
+
+ if (ar->regs->core_clk_div_address == 0 ||
+ ar->regs->cpu_pll_init_address == 0 ||
+ ar->regs->cpu_speed_address == 0)
+ return -EINVAL;
+
+ clk_div_addr = ar->regs->core_clk_div_address;
+ pll_init_addr = ar->regs->cpu_pll_init_address;
+ speed_addr = ar->regs->cpu_speed_address;
+
+ /* Read efuse register to find out the right hw clock configuration */
+ addr = (RTC_SOC_BASE_ADDRESS | EFUSE_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* sanitize if the hw refclk index is out of the boundary */
+ if (MS(reg_val, EFUSE_XTAL_SEL) > ATH10K_HW_REFCLK_COUNT)
+ return -EINVAL;
+
+ hw_clk = &hw->hw_clk[MS(reg_val, EFUSE_XTAL_SEL)];
+
+ /* Set the rnfrac and outdiv params to bb_pll register */
+ addr = (RTC_SOC_BASE_ADDRESS | BB_PLL_CONFIG_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~(BB_PLL_CONFIG_FRAC_MASK | BB_PLL_CONFIG_OUTDIV_MASK);
+ reg_val |= (SM(hw_clk->rnfrac, BB_PLL_CONFIG_FRAC) |
+ SM(hw_clk->outdiv, BB_PLL_CONFIG_OUTDIV));
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* Set the correct settle time value to pll_settle register */
+ addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_SETTLE_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~WLAN_PLL_SETTLE_TIME_MASK;
+ reg_val |= SM(hw_clk->settle_time, WLAN_PLL_SETTLE_TIME);
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* Set the clock_ctrl div to core_clk_ctrl register */
+ addr = (RTC_SOC_BASE_ADDRESS | SOC_CORE_CLK_CTRL_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~SOC_CORE_CLK_CTRL_DIV_MASK;
+ reg_val |= SM(1, SOC_CORE_CLK_CTRL_DIV);
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* Set the clock_div register */
+ mem_val = 1;
+ ret = ath10k_bmi_write_memory(ar, clk_div_addr, &mem_val,
+ sizeof(mem_val));
+ if (ret)
+ return -EINVAL;
+
+ /* Configure the pll_control register */
+ addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val |= (SM(hw_clk->refdiv, WLAN_PLL_CONTROL_REFDIV) |
+ SM(hw_clk->div, WLAN_PLL_CONTROL_DIV) |
+ SM(1, WLAN_PLL_CONTROL_NOPWD));
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* busy wait (max 1s) the rtc_sync status register indicate ready */
+ wait_limit = 100000;
+ addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET);
+ do {
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
+ break;
+
+ wait_limit--;
+ udelay(10);
+
+ } while (wait_limit > 0);
+
+ if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
+ return -EINVAL;
+
+ /* Unset the pll_bypass in pll_control register */
+ addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~WLAN_PLL_CONTROL_BYPASS_MASK;
+ reg_val |= SM(0, WLAN_PLL_CONTROL_BYPASS);
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* busy wait (max 1s) the rtc_sync status register indicate ready */
+ wait_limit = 100000;
+ addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET);
+ do {
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
+ break;
+
+ wait_limit--;
+ udelay(10);
+
+ } while (wait_limit > 0);
+
+ if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
+ return -EINVAL;
+
+ /* Enable the hardware cpu clock register */
+ addr = (RTC_SOC_BASE_ADDRESS | SOC_CPU_CLOCK_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~SOC_CPU_CLOCK_STANDARD_MASK;
+ reg_val |= SM(1, SOC_CPU_CLOCK_STANDARD);
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* unset the nopwd from pll_control register */
+ addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~WLAN_PLL_CONTROL_NOPWD_MASK;
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* enable the pll_init register */
+ mem_val = 1;
+ ret = ath10k_bmi_write_memory(ar, pll_init_addr, &mem_val,
+ sizeof(mem_val));
+ if (ret)
+ return -EINVAL;
+
+ /* set the target clock frequency to speed register */
+ ret = ath10k_bmi_write_memory(ar, speed_addr, &hw->target_cpu_freq,
+ sizeof(hw->target_cpu_freq));
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Program CPU_ADDR_MSB to allow different memory
+ * region access.
+ */
+static void ath10k_hw_map_target_mem(struct ath10k *ar, u32 msb)
+{
+ u32 address = SOC_CORE_BASE_ADDRESS + FW_RAM_CONFIG_ADDRESS;
+
+ ath10k_hif_write32(ar, address, msb);
+}
+
+/* 1. Write to memory region of target, such as IRAM and DRAM.
+ * 2. Target address( 0 ~ 00100000 & 0x00400000~0x00500000)
+ * can be written directly. See ath10k_pci_targ_cpu_to_ce_addr() too.
+ * 3. In order to access the region other than the above,
+ * we need to set the value of register CPU_ADDR_MSB.
+ * 4. Target memory access space is limited to 1M size. If the size is larger
+ * than 1M, need to split it and program CPU_ADDR_MSB accordingly.
+ */
+static int ath10k_hw_diag_segment_msb_download(struct ath10k *ar,
+ const void *buffer,
+ u32 address,
+ u32 length)
+{
+ u32 addr = address & REGION_ACCESS_SIZE_MASK;
+ int ret, remain_size, size;
+ const u8 *buf;
+
+ ath10k_hw_map_target_mem(ar, CPU_ADDR_MSB_REGION_VAL(address));
+
+ if (addr + length > REGION_ACCESS_SIZE_LIMIT) {
+ size = REGION_ACCESS_SIZE_LIMIT - addr;
+ remain_size = length - size;
+
+ ret = ath10k_hif_diag_write(ar, address, buffer, size);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to download the first %d bytes segment to address:0x%x: %d\n",
+ size, address, ret);
+ goto done;
+ }
+
+ /* Change msb to the next memory region*/
+ ath10k_hw_map_target_mem(ar,
+ CPU_ADDR_MSB_REGION_VAL(address) + 1);
+ buf = buffer + size;
+ ret = ath10k_hif_diag_write(ar,
+ address & ~REGION_ACCESS_SIZE_MASK,
+ buf, remain_size);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to download the second %d bytes segment to address:0x%x: %d\n",
+ remain_size,
+ address & ~REGION_ACCESS_SIZE_MASK,
+ ret);
+ goto done;
+ }
+ } else {
+ ret = ath10k_hif_diag_write(ar, address, buffer, length);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to download the only %d bytes segment to address:0x%x: %d\n",
+ length, address, ret);
+ goto done;
+ }
+ }
+
+done:
+ /* Change msb to DRAM */
+ ath10k_hw_map_target_mem(ar,
+ CPU_ADDR_MSB_REGION_VAL(DRAM_BASE_ADDRESS));
+ return ret;
+}
+
+static int ath10k_hw_diag_segment_download(struct ath10k *ar,
+ const void *buffer,
+ u32 address,
+ u32 length)
+{
+ if (address >= DRAM_BASE_ADDRESS + REGION_ACCESS_SIZE_LIMIT)
+ /* Needs to change MSB for memory write */
+ return ath10k_hw_diag_segment_msb_download(ar, buffer,
+ address, length);
+ else
+ return ath10k_hif_diag_write(ar, address, buffer, length);
+}
+
+int ath10k_hw_diag_fast_download(struct ath10k *ar,
+ u32 address,
+ const void *buffer,
+ u32 length)
+{
+ const u8 *buf = buffer;
+ bool sgmt_end = false;
+ u32 base_addr = 0;
+ u32 base_len = 0;
+ u32 left = 0;
+ struct bmi_segmented_file_header *hdr;
+ struct bmi_segmented_metadata *metadata;
+ int ret = 0;
+
+ if (length < sizeof(*hdr))
+ return -EINVAL;
+
+ /* check firmware header. If it has no correct magic number
+ * or it's compressed, returns error.
+ */
+ hdr = (struct bmi_segmented_file_header *)buf;
+ if (__le32_to_cpu(hdr->magic_num) != BMI_SGMTFILE_MAGIC_NUM) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "Not a supported firmware, magic_num:0x%x\n",
+ hdr->magic_num);
+ return -EINVAL;
+ }
+
+ if (hdr->file_flags != 0) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "Not a supported firmware, file_flags:0x%x\n",
+ hdr->file_flags);
+ return -EINVAL;
+ }
+
+ metadata = (struct bmi_segmented_metadata *)hdr->data;
+ left = length - sizeof(*hdr);
+
+ while (left > 0) {
+ if (left < sizeof(*metadata)) {
+ ath10k_warn(ar, "firmware segment is truncated: %d\n",
+ left);
+ ret = -EINVAL;
+ break;
+ }
+ base_addr = __le32_to_cpu(metadata->addr);
+ base_len = __le32_to_cpu(metadata->length);
+ buf = metadata->data;
+ left -= sizeof(*metadata);
+
+ switch (base_len) {
+ case BMI_SGMTFILE_BEGINADDR:
+ /* base_addr is the start address to run */
+ ret = ath10k_bmi_set_start(ar, base_addr);
+ base_len = 0;
+ break;
+ case BMI_SGMTFILE_DONE:
+ /* no more segment */
+ base_len = 0;
+ sgmt_end = true;
+ ret = 0;
+ break;
+ case BMI_SGMTFILE_BDDATA:
+ case BMI_SGMTFILE_EXEC:
+ ath10k_warn(ar,
+ "firmware has unsupported segment:%d\n",
+ base_len);
+ ret = -EINVAL;
+ break;
+ default:
+ if (base_len > left) {
+ /* sanity check */
+ ath10k_warn(ar,
+ "firmware has invalid segment length, %d > %d\n",
+ base_len, left);
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = ath10k_hw_diag_segment_download(ar,
+ buf,
+ base_addr,
+ base_len);
+
+ if (ret)
+ ath10k_warn(ar,
+ "failed to download firmware via diag interface:%d\n",
+ ret);
+ break;
+ }
+
+ if (ret || sgmt_end)
+ break;
+
+ metadata = (struct bmi_segmented_metadata *)(buf + base_len);
+ left -= base_len;
+ }
+
+ if (ret == 0)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot firmware fast diag download successfully.\n");
+ return ret;
+}
+
+static int ath10k_htt_tx_rssi_enable(struct htt_resp *resp)
+{
+ return (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI);
+}
+
+static int ath10k_htt_tx_rssi_enable_wcn3990(struct htt_resp *resp)
+{
+ return (resp->data_tx_completion.flags2 &
+ HTT_TX_DATA_RSSI_ENABLE_WCN3990);
+}
+
+static int ath10k_get_htt_tx_data_rssi_pad(struct htt_resp *resp)
+{
+ struct htt_data_tx_completion_ext extd;
+ int pad_bytes = 0;
+
+ if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_RETRIES)
+ pad_bytes += sizeof(extd.a_retries) /
+ sizeof(extd.msdus_rssi[0]);
+
+ if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_TIMESTAMP)
+ pad_bytes += sizeof(extd.t_stamp) / sizeof(extd.msdus_rssi[0]);
+
+ return pad_bytes;
+}
+
+const struct ath10k_hw_ops qca988x_ops = {
+ .set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
+ .is_rssi_enable = ath10k_htt_tx_rssi_enable,
+};
+
+const struct ath10k_hw_ops qca99x0_ops = {
+ .is_rssi_enable = ath10k_htt_tx_rssi_enable,
+};
+
+const struct ath10k_hw_ops qca6174_ops = {
+ .set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
+ .enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
+ .is_rssi_enable = ath10k_htt_tx_rssi_enable,
+};
+
+const struct ath10k_hw_ops qca6174_sdio_ops = {
+ .enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
+};
+
+const struct ath10k_hw_ops wcn3990_ops = {
+ .tx_data_rssi_pad_bytes = ath10k_get_htt_tx_data_rssi_pad,
+ .is_rssi_enable = ath10k_htt_tx_rssi_enable_wcn3990,
+};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
new file mode 100644
index 000000000000..da71dce9babf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -0,0 +1,1205 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _HW_H_
+#define _HW_H_
+
+#include "targaddrs.h"
+
+enum ath10k_bus {
+ ATH10K_BUS_PCI,
+ ATH10K_BUS_AHB,
+ ATH10K_BUS_SDIO,
+ ATH10K_BUS_USB,
+ ATH10K_BUS_SNOC,
+};
+
+#define ATH10K_FW_DIR "ath10k"
+
+#define QCA988X_2_0_DEVICE_ID_UBNT (0x11ac)
+#define QCA988X_2_0_DEVICE_ID (0x003c)
+#define QCA6164_2_1_DEVICE_ID (0x0041)
+#define QCA6174_2_1_DEVICE_ID (0x003e)
+#define QCA6174_3_2_DEVICE_ID (0x0042)
+#define QCA99X0_2_0_DEVICE_ID (0x0040)
+#define QCA9888_2_0_DEVICE_ID (0x0056)
+#define QCA9984_1_0_DEVICE_ID (0x0046)
+#define QCA9377_1_0_DEVICE_ID (0x0042)
+#define QCA9887_1_0_DEVICE_ID (0x0050)
+
+/* QCA988X 1.0 definitions (unsupported) */
+#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
+
+/* QCA988X 2.0 definitions */
+#define QCA988X_HW_2_0_VERSION 0x4100016c
+#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
+#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0"
+#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA9887 1.0 definitions */
+#define QCA9887_HW_1_0_VERSION 0x4100016d
+#define QCA9887_HW_1_0_CHIP_ID_REV 0
+#define QCA9887_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9887/hw1.0"
+#define QCA9887_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA6174 target BMI version signatures */
+#define QCA6174_HW_1_0_VERSION 0x05000000
+#define QCA6174_HW_1_1_VERSION 0x05000001
+#define QCA6174_HW_1_3_VERSION 0x05000003
+#define QCA6174_HW_2_1_VERSION 0x05010000
+#define QCA6174_HW_3_0_VERSION 0x05020000
+#define QCA6174_HW_3_2_VERSION 0x05030000
+
+/* QCA9377 target BMI version signatures */
+#define QCA9377_HW_1_0_DEV_VERSION 0x05020000
+#define QCA9377_HW_1_1_DEV_VERSION 0x05020001
+
+enum qca6174_pci_rev {
+ QCA6174_PCI_REV_1_1 = 0x11,
+ QCA6174_PCI_REV_1_3 = 0x13,
+ QCA6174_PCI_REV_2_0 = 0x20,
+ QCA6174_PCI_REV_3_0 = 0x30,
+};
+
+enum qca6174_chip_id_rev {
+ QCA6174_HW_1_0_CHIP_ID_REV = 0,
+ QCA6174_HW_1_1_CHIP_ID_REV = 1,
+ QCA6174_HW_1_3_CHIP_ID_REV = 2,
+ QCA6174_HW_2_1_CHIP_ID_REV = 4,
+ QCA6174_HW_2_2_CHIP_ID_REV = 5,
+ QCA6174_HW_3_0_CHIP_ID_REV = 8,
+ QCA6174_HW_3_1_CHIP_ID_REV = 9,
+ QCA6174_HW_3_2_CHIP_ID_REV = 10,
+};
+
+enum qca9377_chip_id_rev {
+ QCA9377_HW_1_0_CHIP_ID_REV = 0x0,
+ QCA9377_HW_1_1_CHIP_ID_REV = 0x1,
+};
+
+#define QCA6174_HW_2_1_FW_DIR ATH10K_FW_DIR "/QCA6174/hw2.1"
+#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
+
+#define QCA6174_HW_3_0_FW_DIR ATH10K_FW_DIR "/QCA6174/hw3.0"
+#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA99X0 1.0 definitions (unsupported) */
+#define QCA99X0_HW_1_0_CHIP_ID_REV 0x0
+
+/* QCA99X0 2.0 definitions */
+#define QCA99X0_HW_2_0_DEV_VERSION 0x01000000
+#define QCA99X0_HW_2_0_CHIP_ID_REV 0x1
+#define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0"
+#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA9984 1.0 defines */
+#define QCA9984_HW_1_0_DEV_VERSION 0x1000000
+#define QCA9984_HW_DEV_TYPE 0xa
+#define QCA9984_HW_1_0_CHIP_ID_REV 0x0
+#define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0"
+#define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA9888 2.0 defines */
+#define QCA9888_HW_2_0_DEV_VERSION 0x1000000
+#define QCA9888_HW_DEV_TYPE 0xc
+#define QCA9888_HW_2_0_CHIP_ID_REV 0x0
+#define QCA9888_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA9888/hw2.0"
+#define QCA9888_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA9377 1.0 definitions */
+#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0"
+#define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA4019 1.0 definitions */
+#define QCA4019_HW_1_0_DEV_VERSION 0x01000000
+#define QCA4019_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA4019/hw1.0"
+#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
+/* WCN3990 1.0 definitions */
+#define WCN3990_HW_1_0_DEV_VERSION ATH10K_HW_WCN3990
+#define WCN3990_HW_1_0_FW_DIR ATH10K_FW_DIR "/WCN3990/hw1.0"
+
+#define ATH10K_FW_FILE_BASE "firmware"
+#define ATH10K_FW_API_MAX 6
+#define ATH10K_FW_API_MIN 2
+
+#define ATH10K_FW_API2_FILE "firmware-2.bin"
+#define ATH10K_FW_API3_FILE "firmware-3.bin"
+
+/* added support for ATH10K_FW_IE_WMI_OP_VERSION */
+#define ATH10K_FW_API4_FILE "firmware-4.bin"
+
+/* HTT id conflict fix for management frames over HTT */
+#define ATH10K_FW_API5_FILE "firmware-5.bin"
+
+/* the firmware-6.bin blob */
+#define ATH10K_FW_API6_FILE "firmware-6.bin"
+
+#define ATH10K_FW_UTF_FILE "utf.bin"
+#define ATH10K_FW_UTF_API2_FILE "utf-2.bin"
+
+#define ATH10K_FW_UTF_FILE_BASE "utf"
+
+/* includes also the null byte */
+#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
+#define ATH10K_BOARD_MAGIC "QCA-ATH10K-BOARD"
+
+#define ATH10K_BOARD_DATA_FILE "board.bin"
+#define ATH10K_BOARD_API2_FILE "board-2.bin"
+#define ATH10K_EBOARD_DATA_FILE "eboard.bin"
+
+#define REG_DUMP_COUNT_QCA988X 60
+
+struct ath10k_fw_ie {
+ __le32 id;
+ __le32 len;
+ u8 data[];
+};
+
+enum ath10k_fw_ie_type {
+ ATH10K_FW_IE_FW_VERSION = 0,
+ ATH10K_FW_IE_TIMESTAMP = 1,
+ ATH10K_FW_IE_FEATURES = 2,
+ ATH10K_FW_IE_FW_IMAGE = 3,
+ ATH10K_FW_IE_OTP_IMAGE = 4,
+
+ /* WMI "operations" interface version, 32 bit value. Supported from
+ * FW API 4 and above.
+ */
+ ATH10K_FW_IE_WMI_OP_VERSION = 5,
+
+ /* HTT "operations" interface version, 32 bit value. Supported from
+ * FW API 5 and above.
+ */
+ ATH10K_FW_IE_HTT_OP_VERSION = 6,
+
+ /* Code swap image for firmware binary */
+ ATH10K_FW_IE_FW_CODE_SWAP_IMAGE = 7,
+};
+
+enum ath10k_fw_wmi_op_version {
+ ATH10K_FW_WMI_OP_VERSION_UNSET = 0,
+
+ ATH10K_FW_WMI_OP_VERSION_MAIN = 1,
+ ATH10K_FW_WMI_OP_VERSION_10_1 = 2,
+ ATH10K_FW_WMI_OP_VERSION_10_2 = 3,
+ ATH10K_FW_WMI_OP_VERSION_TLV = 4,
+ ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5,
+ ATH10K_FW_WMI_OP_VERSION_10_4 = 6,
+
+ /* keep last */
+ ATH10K_FW_WMI_OP_VERSION_MAX,
+};
+
+enum ath10k_fw_htt_op_version {
+ ATH10K_FW_HTT_OP_VERSION_UNSET = 0,
+
+ ATH10K_FW_HTT_OP_VERSION_MAIN = 1,
+
+ /* also used in 10.2 and 10.2.4 branches */
+ ATH10K_FW_HTT_OP_VERSION_10_1 = 2,
+
+ ATH10K_FW_HTT_OP_VERSION_TLV = 3,
+
+ ATH10K_FW_HTT_OP_VERSION_10_4 = 4,
+
+ /* keep last */
+ ATH10K_FW_HTT_OP_VERSION_MAX,
+};
+
+enum ath10k_bd_ie_type {
+ /* contains sub IEs of enum ath10k_bd_ie_board_type */
+ ATH10K_BD_IE_BOARD = 0,
+ ATH10K_BD_IE_BOARD_EXT = 1,
+};
+
+enum ath10k_bd_ie_board_type {
+ ATH10K_BD_IE_BOARD_NAME = 0,
+ ATH10K_BD_IE_BOARD_DATA = 1,
+};
+
+enum ath10k_hw_rev {
+ ATH10K_HW_QCA988X,
+ ATH10K_HW_QCA6174,
+ ATH10K_HW_QCA99X0,
+ ATH10K_HW_QCA9888,
+ ATH10K_HW_QCA9984,
+ ATH10K_HW_QCA9377,
+ ATH10K_HW_QCA4019,
+ ATH10K_HW_QCA9887,
+ ATH10K_HW_WCN3990,
+};
+
+struct ath10k_hw_regs {
+ u32 rtc_soc_base_address;
+ u32 rtc_wmac_base_address;
+ u32 soc_core_base_address;
+ u32 wlan_mac_base_address;
+ u32 ce_wrapper_base_address;
+ u32 ce0_base_address;
+ u32 ce1_base_address;
+ u32 ce2_base_address;
+ u32 ce3_base_address;
+ u32 ce4_base_address;
+ u32 ce5_base_address;
+ u32 ce6_base_address;
+ u32 ce7_base_address;
+ u32 ce8_base_address;
+ u32 ce9_base_address;
+ u32 ce10_base_address;
+ u32 ce11_base_address;
+ u32 soc_reset_control_si0_rst_mask;
+ u32 soc_reset_control_ce_rst_mask;
+ u32 soc_chip_id_address;
+ u32 scratch_3_address;
+ u32 fw_indicator_address;
+ u32 pcie_local_base_address;
+ u32 ce_wrap_intr_sum_host_msi_lsb;
+ u32 ce_wrap_intr_sum_host_msi_mask;
+ u32 pcie_intr_fw_mask;
+ u32 pcie_intr_ce_mask_all;
+ u32 pcie_intr_clr_address;
+ u32 cpu_pll_init_address;
+ u32 cpu_speed_address;
+ u32 core_clk_div_address;
+};
+
+extern const struct ath10k_hw_regs qca988x_regs;
+extern const struct ath10k_hw_regs qca6174_regs;
+extern const struct ath10k_hw_regs qca99x0_regs;
+extern const struct ath10k_hw_regs qca4019_regs;
+extern const struct ath10k_hw_regs wcn3990_regs;
+
+struct ath10k_hw_ce_regs_addr_map {
+ u32 msb;
+ u32 lsb;
+ u32 mask;
+};
+
+struct ath10k_hw_ce_ctrl1 {
+ u32 addr;
+ u32 hw_mask;
+ u32 sw_mask;
+ u32 hw_wr_mask;
+ u32 sw_wr_mask;
+ u32 reset_mask;
+ u32 reset;
+ const struct ath10k_hw_ce_regs_addr_map *src_ring;
+ const struct ath10k_hw_ce_regs_addr_map *dst_ring;
+ const struct ath10k_hw_ce_regs_addr_map *dmax;
+};
+
+struct ath10k_hw_ce_cmd_halt {
+ u32 status_reset;
+ u32 msb;
+ u32 mask;
+ const struct ath10k_hw_ce_regs_addr_map *status;
+};
+
+struct ath10k_hw_ce_host_ie {
+ u32 copy_complete_reset;
+ const struct ath10k_hw_ce_regs_addr_map *copy_complete;
+};
+
+struct ath10k_hw_ce_host_wm_regs {
+ u32 dstr_lmask;
+ u32 dstr_hmask;
+ u32 srcr_lmask;
+ u32 srcr_hmask;
+ u32 cc_mask;
+ u32 wm_mask;
+ u32 addr;
+};
+
+struct ath10k_hw_ce_misc_regs {
+ u32 axi_err;
+ u32 dstr_add_err;
+ u32 srcr_len_err;
+ u32 dstr_mlen_vio;
+ u32 dstr_overflow;
+ u32 srcr_overflow;
+ u32 err_mask;
+ u32 addr;
+};
+
+struct ath10k_hw_ce_dst_src_wm_regs {
+ u32 addr;
+ u32 low_rst;
+ u32 high_rst;
+ const struct ath10k_hw_ce_regs_addr_map *wm_low;
+ const struct ath10k_hw_ce_regs_addr_map *wm_high;
+};
+
+struct ath10k_hw_ce_ctrl1_upd {
+ u32 shift;
+ u32 mask;
+ u32 enable;
+};
+
+struct ath10k_hw_ce_regs {
+ u32 sr_base_addr_lo;
+ u32 sr_base_addr_hi;
+ u32 sr_size_addr;
+ u32 dr_base_addr_lo;
+ u32 dr_base_addr_hi;
+ u32 dr_size_addr;
+ u32 ce_cmd_addr;
+ u32 misc_ie_addr;
+ u32 sr_wr_index_addr;
+ u32 dst_wr_index_addr;
+ u32 current_srri_addr;
+ u32 current_drri_addr;
+ u32 ddr_addr_for_rri_low;
+ u32 ddr_addr_for_rri_high;
+ u32 ce_rri_low;
+ u32 ce_rri_high;
+ u32 host_ie_addr;
+ const struct ath10k_hw_ce_host_wm_regs *wm_regs;
+ const struct ath10k_hw_ce_misc_regs *misc_regs;
+ const struct ath10k_hw_ce_ctrl1 *ctrl1_regs;
+ const struct ath10k_hw_ce_cmd_halt *cmd_halt;
+ const struct ath10k_hw_ce_host_ie *host_ie;
+ const struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
+ const struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr;
+ const struct ath10k_hw_ce_ctrl1_upd *upd;
+};
+
+struct ath10k_hw_values {
+ u32 rtc_state_val_on;
+ u8 ce_count;
+ u8 msi_assign_ce_max;
+ u8 num_target_ce_config_wlan;
+ u16 ce_desc_meta_data_mask;
+ u8 ce_desc_meta_data_lsb;
+ u32 rfkill_pin;
+ u32 rfkill_cfg;
+ bool rfkill_on_level;
+};
+
+extern const struct ath10k_hw_values qca988x_values;
+extern const struct ath10k_hw_values qca6174_values;
+extern const struct ath10k_hw_values qca99x0_values;
+extern const struct ath10k_hw_values qca9888_values;
+extern const struct ath10k_hw_values qca4019_values;
+extern const struct ath10k_hw_values wcn3990_values;
+extern const struct ath10k_hw_ce_regs wcn3990_ce_regs;
+extern const struct ath10k_hw_ce_regs qcax_ce_regs;
+
+void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
+ u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
+
+int ath10k_hw_diag_fast_download(struct ath10k *ar,
+ u32 address,
+ const void *buffer,
+ u32 length);
+
+#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
+#define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887)
+#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
+#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
+#define QCA_REV_9888(ar) ((ar)->hw_rev == ATH10K_HW_QCA9888)
+#define QCA_REV_9984(ar) ((ar)->hw_rev == ATH10K_HW_QCA9984)
+#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377)
+#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019)
+#define QCA_REV_WCN3990(ar) ((ar)->hw_rev == ATH10K_HW_WCN3990)
+
+/* Known peculiarities:
+ * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
+ * - raw have FCS, nwifi doesn't
+ * - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
+ * param, llc/snap) are aligned to 4byte boundaries each
+ */
+enum ath10k_hw_txrx_mode {
+ ATH10K_HW_TXRX_RAW = 0,
+
+ /* Native Wifi decap mode is used to align IP frames to 4-byte
+ * boundaries and avoid a very expensive re-alignment in mac80211.
+ */
+ ATH10K_HW_TXRX_NATIVE_WIFI = 1,
+ ATH10K_HW_TXRX_ETHERNET = 2,
+
+ /* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */
+ ATH10K_HW_TXRX_MGMT = 3,
+};
+
+enum ath10k_mcast2ucast_mode {
+ ATH10K_MCAST2UCAST_DISABLED = 0,
+ ATH10K_MCAST2UCAST_ENABLED = 1,
+};
+
+enum ath10k_hw_rate_ofdm {
+ ATH10K_HW_RATE_OFDM_48M = 0,
+ ATH10K_HW_RATE_OFDM_24M,
+ ATH10K_HW_RATE_OFDM_12M,
+ ATH10K_HW_RATE_OFDM_6M,
+ ATH10K_HW_RATE_OFDM_54M,
+ ATH10K_HW_RATE_OFDM_36M,
+ ATH10K_HW_RATE_OFDM_18M,
+ ATH10K_HW_RATE_OFDM_9M,
+};
+
+enum ath10k_hw_rate_cck {
+ ATH10K_HW_RATE_CCK_LP_11M = 0,
+ ATH10K_HW_RATE_CCK_LP_5_5M,
+ ATH10K_HW_RATE_CCK_LP_2M,
+ ATH10K_HW_RATE_CCK_LP_1M,
+ ATH10K_HW_RATE_CCK_SP_11M,
+ ATH10K_HW_RATE_CCK_SP_5_5M,
+ ATH10K_HW_RATE_CCK_SP_2M,
+};
+
+enum ath10k_hw_rate_rev2_cck {
+ ATH10K_HW_RATE_REV2_CCK_LP_1M = 1,
+ ATH10K_HW_RATE_REV2_CCK_LP_2M,
+ ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
+ ATH10K_HW_RATE_REV2_CCK_LP_11M,
+ ATH10K_HW_RATE_REV2_CCK_SP_2M,
+ ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
+ ATH10K_HW_RATE_REV2_CCK_SP_11M,
+};
+
+enum ath10k_hw_cc_wraparound_type {
+ ATH10K_HW_CC_WRAP_DISABLED = 0,
+
+ /* This type is when the HW chip has a quirky Cycle Counter
+ * wraparound which resets to 0x7fffffff instead of 0. All
+ * other CC related counters (e.g. Rx Clear Count) are divided
+ * by 2 so they never wraparound themselves.
+ */
+ ATH10K_HW_CC_WRAP_SHIFTED_ALL = 1,
+
+ /* Each hw counter wraps around independently. When the
+ * counter overflows the respective counter is right shifted
+ * by 1, i.e reset to 0x7fffffff, and other counters will be
+ * running unaffected. In this type of wraparound, it should
+ * be possible to report accurate Rx busy time unlike the
+ * first type.
+ */
+ ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2,
+};
+
+enum ath10k_hw_refclk_speed {
+ ATH10K_HW_REFCLK_UNKNOWN = -1,
+ ATH10K_HW_REFCLK_48_MHZ = 0,
+ ATH10K_HW_REFCLK_19_2_MHZ = 1,
+ ATH10K_HW_REFCLK_24_MHZ = 2,
+ ATH10K_HW_REFCLK_26_MHZ = 3,
+ ATH10K_HW_REFCLK_37_4_MHZ = 4,
+ ATH10K_HW_REFCLK_38_4_MHZ = 5,
+ ATH10K_HW_REFCLK_40_MHZ = 6,
+ ATH10K_HW_REFCLK_52_MHZ = 7,
+
+ /* must be the last one */
+ ATH10K_HW_REFCLK_COUNT,
+};
+
+struct ath10k_hw_clk_params {
+ u32 refclk;
+ u32 div;
+ u32 rnfrac;
+ u32 settle_time;
+ u32 refdiv;
+ u32 outdiv;
+};
+
+struct htt_rx_desc_ops;
+
+struct ath10k_hw_params {
+ u32 id;
+ u16 dev_id;
+ enum ath10k_bus bus;
+ const char *name;
+ u32 patch_load_addr;
+ int uart_pin;
+ int led_pin;
+ u32 otp_exe_param;
+
+ /* Type of hw cycle counter wraparound logic, for more info
+ * refer enum ath10k_hw_cc_wraparound_type.
+ */
+ enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
+
+ /* Some of chip expects fragment descriptor to be continuous
+ * memory for any TX operation. Set continuous_frag_desc flag
+ * for the hardware which have such requirement.
+ */
+ bool continuous_frag_desc;
+
+ /* CCK hardware rate table mapping for the newer chipsets
+ * like QCA99X0, QCA4019 got revised. The CCK h/w rate values
+ * are in a proper order with respect to the rate/preamble
+ */
+ bool cck_rate_map_rev2;
+
+ u32 channel_counters_freq_hz;
+
+ /* Mgmt tx descriptors threshold for limiting probe response
+ * frames.
+ */
+ u32 max_probe_resp_desc_thres;
+
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 max_spatial_stream;
+ u32 cal_data_len;
+
+ struct ath10k_hw_params_fw {
+ const char *dir;
+ size_t board_size;
+ size_t ext_board_size;
+ size_t board_ext_size;
+ } fw;
+
+ /* qca99x0 family chips deliver broadcast/multicast management
+ * frames encrypted and expect software do decryption.
+ */
+ bool sw_decrypt_mcast_mgmt;
+
+ /* Rx descriptor abstraction */
+ const struct ath10k_htt_rx_desc_ops *rx_desc_ops;
+
+ const struct ath10k_hw_ops *hw_ops;
+
+ /* Number of bytes used for alignment in rx_hdr_status of rx desc. */
+ int decap_align_bytes;
+
+ /* hw specific clock control parameters */
+ const struct ath10k_hw_clk_params *hw_clk;
+ int target_cpu_freq;
+
+ /* Number of bytes to be discarded for each FFT sample */
+ int spectral_bin_discard;
+
+ /* The board may have a restricted NSS for 160 or 80+80 vs what it
+ * can do for 80Mhz.
+ */
+ int vht160_mcs_rx_highest;
+ int vht160_mcs_tx_highest;
+
+ /* Number of ciphers supported (i.e First N) in cipher_suites array */
+ int n_cipher_suites;
+
+ u32 num_peers;
+ u32 ast_skid_limit;
+ u32 num_wds_entries;
+
+ /* Targets supporting physical addressing capability above 32-bits */
+ bool target_64bit;
+
+ /* Target rx ring fill level */
+ u32 rx_ring_fill_level;
+
+ /* target supporting shadow register for ce write */
+ bool shadow_reg_support;
+
+ /* target supporting retention restore on ddr */
+ bool rri_on_ddr;
+
+ /* Number of bytes to be the offset for each FFT sample */
+ int spectral_bin_offset;
+
+ /* targets which require hw filter reset during boot up,
+ * to avoid it sending spurious acks.
+ */
+ bool hw_filter_reset_required;
+
+ /* target supporting fw download via diag ce */
+ bool fw_diag_ce_download;
+
+ /* target supporting fw download via large size BMI */
+ bool bmi_large_size_download;
+
+ /* need to set uart pin if disable uart print, workaround for a
+ * firmware bug
+ */
+ bool uart_pin_workaround;
+
+ /* Workaround for the credit size calculation */
+ bool credit_size_workaround;
+
+ /* tx stats support over pktlog */
+ bool tx_stats_over_pktlog;
+
+ /* provides bitrates for sta_statistics using WMI_TLV_PEER_STATS_INFO_EVENTID */
+ bool supports_peer_stats_info;
+
+ bool dynamic_sar_support;
+
+ bool hw_restart_disconnect;
+
+ bool use_fw_tx_credits;
+
+ bool delay_unmap_buffer;
+
+ /* The hardware support multicast frame registrations */
+ bool mcast_frame_registration;
+};
+
+struct htt_resp;
+struct htt_data_tx_completion_ext;
+struct htt_rx_ring_rx_desc_offsets;
+
+/* Defines needed for Rx descriptor abstraction */
+struct ath10k_hw_ops {
+ void (*set_coverage_class)(struct ath10k *ar, int radio_idx, s16 value);
+ int (*enable_pll_clk)(struct ath10k *ar);
+ int (*tx_data_rssi_pad_bytes)(struct htt_resp *htt);
+ int (*is_rssi_enable)(struct htt_resp *resp);
+};
+
+extern const struct ath10k_hw_ops qca988x_ops;
+extern const struct ath10k_hw_ops qca99x0_ops;
+extern const struct ath10k_hw_ops qca6174_ops;
+extern const struct ath10k_hw_ops qca6174_sdio_ops;
+extern const struct ath10k_hw_ops wcn3990_ops;
+
+extern const struct ath10k_hw_clk_params qca6174_clk[];
+
+static inline int
+ath10k_tx_data_rssi_get_pad_bytes(struct ath10k_hw_params *hw,
+ struct htt_resp *htt)
+{
+ if (hw->hw_ops->tx_data_rssi_pad_bytes)
+ return hw->hw_ops->tx_data_rssi_pad_bytes(htt);
+ return 0;
+}
+
+static inline int
+ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
+ struct htt_resp *resp)
+{
+ if (hw->hw_ops->is_rssi_enable)
+ return hw->hw_ops->is_rssi_enable(resp);
+ return 0;
+}
+
+/* Target specific defines for MAIN firmware */
+#define TARGET_NUM_VDEVS 8
+#define TARGET_NUM_PEER_AST 2
+#define TARGET_NUM_WDS_ENTRIES 32
+#define TARGET_DMA_BURST_SIZE 0
+#define TARGET_MAC_AGGR_DELIM 0
+#define TARGET_AST_SKID_LIMIT 16
+#define TARGET_NUM_STATIONS 16
+#define TARGET_NUM_PEERS ((TARGET_NUM_STATIONS) + \
+ (TARGET_NUM_VDEVS))
+#define TARGET_NUM_OFFLOAD_PEERS 0
+#define TARGET_NUM_OFFLOAD_REORDER_BUFS 0
+#define TARGET_NUM_PEER_KEYS 2
+#define TARGET_NUM_TIDS ((TARGET_NUM_PEERS) * 2)
+#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_RX_TIMEOUT_LO_PRI 100
+#define TARGET_RX_TIMEOUT_HI_PRI 40
+
+#define TARGET_SCAN_MAX_PENDING_REQS 4
+#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_NUM_MCAST_GROUPS 0
+#define TARGET_NUM_MCAST_TABLE_ELEMS 0
+#define TARGET_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_TX_DBG_LOG_SIZE 1024
+#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 0
+#define TARGET_VOW_CONFIG 0
+#define TARGET_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_MAX_FRAG_ENTRIES 0
+
+/* Target specific defines for 10.X firmware */
+#define TARGET_10X_NUM_VDEVS 16
+#define TARGET_10X_NUM_PEER_AST 2
+#define TARGET_10X_NUM_WDS_ENTRIES 32
+#define TARGET_10X_DMA_BURST_SIZE 0
+#define TARGET_10X_MAC_AGGR_DELIM 0
+#define TARGET_10X_AST_SKID_LIMIT 128
+#define TARGET_10X_NUM_STATIONS 128
+#define TARGET_10X_TX_STATS_NUM_STATIONS 118
+#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \
+ (TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_TX_STATS_NUM_PEERS ((TARGET_10X_TX_STATS_NUM_STATIONS) + \
+ (TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_NUM_OFFLOAD_PEERS 0
+#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
+#define TARGET_10X_NUM_PEER_KEYS 2
+#define TARGET_10X_NUM_TIDS_MAX 256
+#define TARGET_10X_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \
+ (TARGET_10X_NUM_PEERS) * 2)
+#define TARGET_10X_TX_STATS_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \
+ (TARGET_10X_TX_STATS_NUM_PEERS) * 2)
+#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
+#define TARGET_10X_RX_TIMEOUT_HI_PRI 40
+#define TARGET_10X_SCAN_MAX_PENDING_REQS 4
+#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_10X_NUM_MCAST_GROUPS 0
+#define TARGET_10X_NUM_MCAST_TABLE_ELEMS 0
+#define TARGET_10X_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_10X_TX_DBG_LOG_SIZE 1024
+#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10X_VOW_CONFIG 0
+#define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_10X_MAX_FRAG_ENTRIES 0
+
+/* 10.2 parameters */
+#define TARGET_10_2_DMA_BURST_SIZE 0
+
+/* Target specific defines for WMI-TLV firmware */
+#define TARGET_TLV_NUM_VDEVS 4
+#define TARGET_TLV_NUM_STATIONS 32
+#define TARGET_TLV_NUM_PEERS 33
+#define TARGET_TLV_NUM_TDLS_VDEVS 1
+#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
+#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
+#define TARGET_TLV_NUM_MSDU_DESC_HL 1024
+#define TARGET_TLV_NUM_WOW_PATTERNS 22
+#define TARGET_TLV_MGMT_NUM_MSDU_DESC (50)
+
+/* Target specific defines for WMI-HL-1.0 firmware */
+#define TARGET_HL_TLV_NUM_PEERS 33
+#define TARGET_HL_TLV_AST_SKID_LIMIT 16
+#define TARGET_HL_TLV_NUM_WDS_ENTRIES 2
+
+/* Target specific defines for QCA9377 high latency firmware */
+#define TARGET_QCA9377_HL_NUM_PEERS 15
+
+/* Diagnostic Window */
+#define CE_DIAG_PIPE 7
+
+#define NUM_TARGET_CE_CONFIG_WLAN ar->hw_values->num_target_ce_config_wlan
+
+/* Target specific defines for 10.4 firmware */
+#define TARGET_10_4_NUM_VDEVS 16
+#define TARGET_10_4_NUM_STATIONS 32
+#define TARGET_10_4_NUM_PEERS ((TARGET_10_4_NUM_STATIONS) + \
+ (TARGET_10_4_NUM_VDEVS))
+#define TARGET_10_4_ACTIVE_PEERS 0
+
+#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC 35
+#define TARGET_10_4_NUM_OFFLOAD_PEERS 0
+#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0
+#define TARGET_10_4_NUM_PEER_KEYS 2
+#define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2)
+#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_10_4_NUM_MSDU_DESC_PFC 2500
+#define TARGET_10_4_AST_SKID_LIMIT 32
+
+/* 100 ms for video, best-effort, and background */
+#define TARGET_10_4_RX_TIMEOUT_LO_PRI 100
+
+/* 40 ms for voice */
+#define TARGET_10_4_RX_TIMEOUT_HI_PRI 40
+
+#define TARGET_10_4_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
+#define TARGET_10_4_SCAN_MAX_REQS 4
+#define TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV 3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV 3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES 8
+
+/* Note: mcast to ucast is disabled by default */
+#define TARGET_10_4_NUM_MCAST_GROUPS 0
+#define TARGET_10_4_NUM_MCAST_TABLE_ELEMS 0
+#define TARGET_10_4_MCAST2UCAST_MODE 0
+
+#define TARGET_10_4_TX_DBG_LOG_SIZE 1024
+#define TARGET_10_4_NUM_WDS_ENTRIES 32
+#define TARGET_10_4_DMA_BURST_SIZE 1
+#define TARGET_10_4_MAC_AGGR_DELIM 0
+#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10_4_VOW_CONFIG 0
+#define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_10_4_11AC_TX_MAX_FRAGS 2
+#define TARGET_10_4_MAX_PEER_EXT_STATS 16
+#define TARGET_10_4_SMART_ANT_CAP 0
+#define TARGET_10_4_BK_MIN_FREE 0
+#define TARGET_10_4_BE_MIN_FREE 0
+#define TARGET_10_4_VI_MIN_FREE 0
+#define TARGET_10_4_VO_MIN_FREE 0
+#define TARGET_10_4_RX_BATCH_MODE 1
+#define TARGET_10_4_THERMAL_THROTTLING_CONFIG 0
+#define TARGET_10_4_ATF_CONFIG 0
+#define TARGET_10_4_IPHDR_PAD_CONFIG 1
+#define TARGET_10_4_QWRAP_CONFIG 0
+
+/* TDLS config */
+#define TARGET_10_4_NUM_TDLS_VDEVS 1
+#define TARGET_10_4_NUM_TDLS_BUFFER_STA 1
+#define TARGET_10_4_NUM_TDLS_SLEEP_STA 1
+
+/* Maximum number of Copy Engines supported */
+#define CE_COUNT_MAX 12
+
+/* Number of Copy Engines supported */
+#define CE_COUNT ar->hw_values->ce_count
+
+/*
+ * Granted MSIs are assigned as follows:
+ * Firmware uses the first
+ * Remaining MSIs, if any, are used by Copy Engines
+ * This mapping is known to both Target firmware and Host software.
+ * It may be changed as long as Host and Target are kept in sync.
+ */
+/* MSI for firmware (errors, etc.) */
+#define MSI_ASSIGN_FW 0
+
+/* MSIs for Copy Engines */
+#define MSI_ASSIGN_CE_INITIAL 1
+#define MSI_ASSIGN_CE_MAX ar->hw_values->msi_assign_ce_max
+
+/* as of IP3.7.1 */
+#define RTC_STATE_V_ON ar->hw_values->rtc_state_val_on
+
+#define RTC_STATE_V_LSB 0
+#define RTC_STATE_V_MASK 0x00000007
+#define RTC_STATE_ADDRESS 0x0000
+#define PCIE_SOC_WAKE_V_MASK 0x00000001
+#define PCIE_SOC_WAKE_ADDRESS 0x0004
+#define PCIE_SOC_WAKE_RESET 0x00000000
+#define SOC_GLOBAL_RESET_ADDRESS 0x0008
+
+#define RTC_SOC_BASE_ADDRESS ar->regs->rtc_soc_base_address
+#define RTC_WMAC_BASE_ADDRESS ar->regs->rtc_wmac_base_address
+#define MAC_COEX_BASE_ADDRESS 0x00006000
+#define BT_COEX_BASE_ADDRESS 0x00007000
+#define SOC_PCIE_BASE_ADDRESS 0x00008000
+#define SOC_CORE_BASE_ADDRESS ar->regs->soc_core_base_address
+#define WLAN_UART_BASE_ADDRESS 0x0000c000
+#define WLAN_SI_BASE_ADDRESS 0x00010000
+#define WLAN_GPIO_BASE_ADDRESS 0x00014000
+#define WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000
+#define WLAN_MAC_BASE_ADDRESS ar->regs->wlan_mac_base_address
+#define EFUSE_BASE_ADDRESS 0x00030000
+#define FPGA_REG_BASE_ADDRESS 0x00039000
+#define WLAN_UART2_BASE_ADDRESS 0x00054c00
+#define CE_WRAPPER_BASE_ADDRESS ar->regs->ce_wrapper_base_address
+#define CE0_BASE_ADDRESS ar->regs->ce0_base_address
+#define CE1_BASE_ADDRESS ar->regs->ce1_base_address
+#define CE2_BASE_ADDRESS ar->regs->ce2_base_address
+#define CE3_BASE_ADDRESS ar->regs->ce3_base_address
+#define CE4_BASE_ADDRESS ar->regs->ce4_base_address
+#define CE5_BASE_ADDRESS ar->regs->ce5_base_address
+#define CE6_BASE_ADDRESS ar->regs->ce6_base_address
+#define CE7_BASE_ADDRESS ar->regs->ce7_base_address
+#define DBI_BASE_ADDRESS 0x00060000
+#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
+#define PCIE_LOCAL_BASE_ADDRESS ar->regs->pcie_local_base_address
+
+#define SOC_RESET_CONTROL_ADDRESS 0x00000000
+#define SOC_RESET_CONTROL_OFFSET 0x00000000
+#define SOC_RESET_CONTROL_SI0_RST_MASK ar->regs->soc_reset_control_si0_rst_mask
+#define SOC_RESET_CONTROL_CE_RST_MASK ar->regs->soc_reset_control_ce_rst_mask
+#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
+#define SOC_CPU_CLOCK_OFFSET 0x00000020
+#define SOC_CPU_CLOCK_STANDARD_LSB 0
+#define SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
+#define SOC_CLOCK_CONTROL_OFFSET 0x00000028
+#define SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001
+#define SOC_SYSTEM_SLEEP_OFFSET 0x000000c4
+#define SOC_LPO_CAL_OFFSET 0x000000e0
+#define SOC_LPO_CAL_ENABLE_LSB 20
+#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
+#define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
+#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
+
+#define SOC_CHIP_ID_ADDRESS ar->regs->soc_chip_id_address
+#define SOC_CHIP_ID_REV_LSB 8
+#define SOC_CHIP_ID_REV_MASK 0x00000f00
+
+#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
+#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
+#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
+#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
+
+#define WLAN_GPIO_PIN0_ADDRESS 0x00000028
+#define WLAN_GPIO_PIN0_CONFIG_LSB 11
+#define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800
+#define WLAN_GPIO_PIN0_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN0_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN1_ADDRESS 0x0000002c
+#define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800
+#define WLAN_GPIO_PIN10_ADDRESS 0x00000050
+#define WLAN_GPIO_PIN11_ADDRESS 0x00000054
+#define WLAN_GPIO_PIN12_ADDRESS 0x00000058
+#define WLAN_GPIO_PIN13_ADDRESS 0x0000005c
+
+#define CLOCK_GPIO_OFFSET 0xffffffff
+#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0
+#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0
+
+#define SI_CONFIG_OFFSET 0x00000000
+#define SI_CONFIG_ERR_INT_LSB 19
+#define SI_CONFIG_ERR_INT_MASK 0x00080000
+#define SI_CONFIG_BIDIR_OD_DATA_LSB 18
+#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
+#define SI_CONFIG_I2C_LSB 16
+#define SI_CONFIG_I2C_MASK 0x00010000
+#define SI_CONFIG_POS_SAMPLE_LSB 7
+#define SI_CONFIG_POS_SAMPLE_MASK 0x00000080
+#define SI_CONFIG_INACTIVE_DATA_LSB 5
+#define SI_CONFIG_INACTIVE_DATA_MASK 0x00000020
+#define SI_CONFIG_INACTIVE_CLK_LSB 4
+#define SI_CONFIG_INACTIVE_CLK_MASK 0x00000010
+#define SI_CONFIG_DIVIDER_LSB 0
+#define SI_CONFIG_DIVIDER_MASK 0x0000000f
+#define SI_CS_OFFSET 0x00000004
+#define SI_CS_DONE_ERR_LSB 10
+#define SI_CS_DONE_ERR_MASK 0x00000400
+#define SI_CS_DONE_INT_LSB 9
+#define SI_CS_DONE_INT_MASK 0x00000200
+#define SI_CS_START_LSB 8
+#define SI_CS_START_MASK 0x00000100
+#define SI_CS_RX_CNT_LSB 4
+#define SI_CS_RX_CNT_MASK 0x000000f0
+#define SI_CS_TX_CNT_LSB 0
+#define SI_CS_TX_CNT_MASK 0x0000000f
+
+#define SI_TX_DATA0_OFFSET 0x00000008
+#define SI_TX_DATA1_OFFSET 0x0000000c
+#define SI_RX_DATA0_OFFSET 0x00000010
+#define SI_RX_DATA1_OFFSET 0x00000014
+
+#define CORE_CTRL_CPU_INTR_MASK 0x00002000
+#define CORE_CTRL_PCIE_REG_31_MASK 0x00000800
+#define CORE_CTRL_ADDRESS 0x0000
+#define PCIE_INTR_ENABLE_ADDRESS 0x0008
+#define PCIE_INTR_CAUSE_ADDRESS 0x000c
+#define PCIE_INTR_CLR_ADDRESS ar->regs->pcie_intr_clr_address
+#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
+#define CPU_INTR_ADDRESS 0x0010
+#define FW_RAM_CONFIG_ADDRESS 0x0018
+
+#define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz)
+
+/* Firmware indications to the Host via SCRATCH_3 register. */
+#define FW_INDICATOR_ADDRESS ar->regs->fw_indicator_address
+#define FW_IND_EVENT_PENDING 1
+#define FW_IND_INITIALIZED 2
+#define FW_IND_HOST_READY 0x80000000
+
+/* HOST_REG interrupt from firmware */
+#define PCIE_INTR_FIRMWARE_MASK ar->regs->pcie_intr_fw_mask
+#define PCIE_INTR_CE_MASK_ALL ar->regs->pcie_intr_ce_mask_all
+
+#define DRAM_BASE_ADDRESS 0x00400000
+
+#define PCIE_BAR_REG_ADDRESS 0x40030
+
+#define MISSING 0
+
+#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
+#define WLAN_SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
+#define WLAN_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_OFFSET
+#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET
+#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK
+#define RESET_CONTROL_MBOX_RST_MASK MISSING
+#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK
+#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS
+#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS
+#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS
+#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB
+#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK
+#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB
+#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK
+#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK
+#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS
+#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS
+#define LOCAL_SCRATCH_OFFSET 0x18
+#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_OFFSET
+#define LPO_CAL_OFFSET SOC_LPO_CAL_OFFSET
+#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS
+#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS
+#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS
+#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS
+#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB
+#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK
+#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB
+#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK
+#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS
+#define MBOX_BASE_ADDRESS MISSING
+#define INT_STATUS_ENABLE_ERROR_LSB MISSING
+#define INT_STATUS_ENABLE_ERROR_MASK MISSING
+#define INT_STATUS_ENABLE_CPU_LSB MISSING
+#define INT_STATUS_ENABLE_CPU_MASK MISSING
+#define INT_STATUS_ENABLE_COUNTER_LSB MISSING
+#define INT_STATUS_ENABLE_COUNTER_MASK MISSING
+#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING
+#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING
+#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING
+#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING
+#define INT_STATUS_ENABLE_ADDRESS MISSING
+#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING
+#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING
+#define HOST_INT_STATUS_ADDRESS MISSING
+#define CPU_INT_STATUS_ADDRESS MISSING
+#define ERROR_INT_STATUS_ADDRESS MISSING
+#define ERROR_INT_STATUS_WAKEUP_MASK MISSING
+#define ERROR_INT_STATUS_WAKEUP_LSB MISSING
+#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING
+#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING
+#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING
+#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING
+#define COUNT_DEC_ADDRESS MISSING
+#define HOST_INT_STATUS_CPU_MASK MISSING
+#define HOST_INT_STATUS_CPU_LSB MISSING
+#define HOST_INT_STATUS_ERROR_MASK MISSING
+#define HOST_INT_STATUS_ERROR_LSB MISSING
+#define HOST_INT_STATUS_COUNTER_MASK MISSING
+#define HOST_INT_STATUS_COUNTER_LSB MISSING
+#define RX_LOOKAHEAD_VALID_ADDRESS MISSING
+#define WINDOW_DATA_ADDRESS MISSING
+#define WINDOW_READ_ADDR_ADDRESS MISSING
+#define WINDOW_WRITE_ADDR_ADDRESS MISSING
+
+#define QCA9887_1_0_I2C_SDA_GPIO_PIN 5
+#define QCA9887_1_0_I2C_SDA_PIN_CONFIG 3
+#define QCA9887_1_0_SI_CLK_GPIO_PIN 17
+#define QCA9887_1_0_SI_CLK_PIN_CONFIG 3
+#define QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS 0x00000010
+
+#define QCA9887_EEPROM_SELECT_READ 0xa10000a0
+#define QCA9887_EEPROM_ADDR_HI_MASK 0x0000ff00
+#define QCA9887_EEPROM_ADDR_HI_LSB 8
+#define QCA9887_EEPROM_ADDR_LO_MASK 0x00ff0000
+#define QCA9887_EEPROM_ADDR_LO_LSB 16
+
+#define MBOX_RESET_CONTROL_ADDRESS 0x00000000
+#define MBOX_HOST_INT_STATUS_ADDRESS 0x00000800
+#define MBOX_HOST_INT_STATUS_ERROR_LSB 7
+#define MBOX_HOST_INT_STATUS_ERROR_MASK 0x00000080
+#define MBOX_HOST_INT_STATUS_CPU_LSB 6
+#define MBOX_HOST_INT_STATUS_CPU_MASK 0x00000040
+#define MBOX_HOST_INT_STATUS_COUNTER_LSB 4
+#define MBOX_HOST_INT_STATUS_COUNTER_MASK 0x00000010
+#define MBOX_CPU_INT_STATUS_ADDRESS 0x00000801
+#define MBOX_ERROR_INT_STATUS_ADDRESS 0x00000802
+#define MBOX_ERROR_INT_STATUS_WAKEUP_LSB 2
+#define MBOX_ERROR_INT_STATUS_WAKEUP_MASK 0x00000004
+#define MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_LSB 1
+#define MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK 0x00000002
+#define MBOX_ERROR_INT_STATUS_TX_OVERFLOW_LSB 0
+#define MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK 0x00000001
+#define MBOX_COUNTER_INT_STATUS_ADDRESS 0x00000803
+#define MBOX_COUNTER_INT_STATUS_COUNTER_LSB 0
+#define MBOX_COUNTER_INT_STATUS_COUNTER_MASK 0x000000ff
+#define MBOX_RX_LOOKAHEAD_VALID_ADDRESS 0x00000805
+#define MBOX_INT_STATUS_ENABLE_ADDRESS 0x00000828
+#define MBOX_INT_STATUS_ENABLE_ERROR_LSB 7
+#define MBOX_INT_STATUS_ENABLE_ERROR_MASK 0x00000080
+#define MBOX_INT_STATUS_ENABLE_CPU_LSB 6
+#define MBOX_INT_STATUS_ENABLE_CPU_MASK 0x00000040
+#define MBOX_INT_STATUS_ENABLE_INT_LSB 5
+#define MBOX_INT_STATUS_ENABLE_INT_MASK 0x00000020
+#define MBOX_INT_STATUS_ENABLE_COUNTER_LSB 4
+#define MBOX_INT_STATUS_ENABLE_COUNTER_MASK 0x00000010
+#define MBOX_INT_STATUS_ENABLE_MBOX_DATA_LSB 0
+#define MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK 0x0000000f
+#define MBOX_CPU_INT_STATUS_ENABLE_ADDRESS 0x00000819
+#define MBOX_CPU_INT_STATUS_ENABLE_BIT_LSB 0
+#define MBOX_CPU_INT_STATUS_ENABLE_BIT_MASK 0x000000ff
+#define MBOX_CPU_STATUS_ENABLE_ASSERT_MASK 0x00000001
+#define MBOX_ERROR_STATUS_ENABLE_ADDRESS 0x0000081a
+#define MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB 1
+#define MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK 0x00000002
+#define MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB 0
+#define MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK 0x00000001
+#define MBOX_COUNTER_INT_STATUS_ENABLE_ADDRESS 0x0000081b
+#define MBOX_COUNTER_INT_STATUS_ENABLE_BIT_LSB 0
+#define MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK 0x000000ff
+#define MBOX_COUNT_ADDRESS 0x00000820
+#define MBOX_COUNT_DEC_ADDRESS 0x00000840
+#define MBOX_WINDOW_DATA_ADDRESS 0x00000874
+#define MBOX_WINDOW_WRITE_ADDR_ADDRESS 0x00000878
+#define MBOX_WINDOW_READ_ADDR_ADDRESS 0x0000087c
+#define MBOX_CPU_DBG_SEL_ADDRESS 0x00000883
+#define MBOX_CPU_DBG_ADDRESS 0x00000884
+#define MBOX_RTC_BASE_ADDRESS 0x00000000
+#define MBOX_GPIO_BASE_ADDRESS 0x00005000
+#define MBOX_MBOX_BASE_ADDRESS 0x00008000
+
+#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
+
+/* Register definitions for first generation ath10k cards. These cards include
+ * a mac which has a register allocation similar to ath9k and at least some
+ * registers including the ones relevant for modifying the coverage class are
+ * identical to the ath9k definitions.
+ * These registers are usually managed by the ath10k firmware. However by
+ * overriding them it is possible to support coverage class modifications.
+ */
+#define WAVE1_PCU_ACK_CTS_TIMEOUT 0x8014
+#define WAVE1_PCU_ACK_CTS_TIMEOUT_MAX 0x00003FFF
+#define WAVE1_PCU_ACK_CTS_TIMEOUT_ACK_MASK 0x00003FFF
+#define WAVE1_PCU_ACK_CTS_TIMEOUT_ACK_LSB 0
+#define WAVE1_PCU_ACK_CTS_TIMEOUT_CTS_MASK 0x3FFF0000
+#define WAVE1_PCU_ACK_CTS_TIMEOUT_CTS_LSB 16
+
+#define WAVE1_PCU_GBL_IFS_SLOT 0x1070
+#define WAVE1_PCU_GBL_IFS_SLOT_MASK 0x0000FFFF
+#define WAVE1_PCU_GBL_IFS_SLOT_MAX 0x0000FFFF
+#define WAVE1_PCU_GBL_IFS_SLOT_LSB 0
+#define WAVE1_PCU_GBL_IFS_SLOT_RESV0 0xFFFF0000
+
+#define WAVE1_PHYCLK 0x801C
+#define WAVE1_PHYCLK_USEC_MASK 0x0000007F
+#define WAVE1_PHYCLK_USEC_LSB 0
+
+/* qca6174 PLL offset/mask */
+#define SOC_CORE_CLK_CTRL_OFFSET 0x00000114
+#define SOC_CORE_CLK_CTRL_DIV_LSB 0
+#define SOC_CORE_CLK_CTRL_DIV_MASK 0x00000007
+
+#define EFUSE_OFFSET 0x0000032c
+#define EFUSE_XTAL_SEL_LSB 8
+#define EFUSE_XTAL_SEL_MASK 0x00000700
+
+#define BB_PLL_CONFIG_OFFSET 0x000002f4
+#define BB_PLL_CONFIG_FRAC_LSB 0
+#define BB_PLL_CONFIG_FRAC_MASK 0x0003ffff
+#define BB_PLL_CONFIG_OUTDIV_LSB 18
+#define BB_PLL_CONFIG_OUTDIV_MASK 0x001c0000
+
+#define WLAN_PLL_SETTLE_OFFSET 0x0018
+#define WLAN_PLL_SETTLE_TIME_LSB 0
+#define WLAN_PLL_SETTLE_TIME_MASK 0x000007ff
+
+#define WLAN_PLL_CONTROL_OFFSET 0x0014
+#define WLAN_PLL_CONTROL_DIV_LSB 0
+#define WLAN_PLL_CONTROL_DIV_MASK 0x000003ff
+#define WLAN_PLL_CONTROL_REFDIV_LSB 10
+#define WLAN_PLL_CONTROL_REFDIV_MASK 0x00003c00
+#define WLAN_PLL_CONTROL_BYPASS_LSB 16
+#define WLAN_PLL_CONTROL_BYPASS_MASK 0x00010000
+#define WLAN_PLL_CONTROL_NOPWD_LSB 18
+#define WLAN_PLL_CONTROL_NOPWD_MASK 0x00040000
+
+#define RTC_SYNC_STATUS_OFFSET 0x0244
+#define RTC_SYNC_STATUS_PLL_CHANGING_LSB 5
+#define RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020
+/* qca6174 PLL offset/mask end */
+
+/* CPU_ADDR_MSB is a register, bit[3:0] is to specify which memory
+ * region is accessed. The memory region size is 1M.
+ * If host wants to access 0xX12345 at target, then CPU_ADDR_MSB[3:0]
+ * is 0xX.
+ * The following MACROs are defined to get the 0xX and the size limit.
+ */
+#define CPU_ADDR_MSB_REGION_MASK GENMASK(23, 20)
+#define CPU_ADDR_MSB_REGION_VAL(X) FIELD_GET(CPU_ADDR_MSB_REGION_MASK, X)
+#define REGION_ACCESS_SIZE_LIMIT 0x100000
+#define REGION_ACCESS_SIZE_MASK (REGION_ACCESS_SIZE_LIMIT - 1)
+
+#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/leds.c b/drivers/net/wireless/ath/ath10k/leds.c
new file mode 100644
index 000000000000..3a6c8111e7c6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/leds.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 Sebastian Gottschall <s.gottschall@dd-wrt.com>
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/leds.h>
+
+#include "core.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+
+#include "leds.h"
+
+static int ath10k_leds_set_brightness_blocking(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath10k *ar = container_of(led_cdev, struct ath10k,
+ leds.cdev);
+ struct gpio_led *led = &ar->leds.wifi_led;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON)
+ goto out;
+
+ ar->leds.gpio_state_pin = (brightness != LED_OFF) ^ led->active_low;
+ ath10k_wmi_gpio_output(ar, ar->hw_params.led_pin, ar->leds.gpio_state_pin);
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+int ath10k_leds_start(struct ath10k *ar)
+{
+ if (ar->hw_params.led_pin == 0)
+ /* leds not supported */
+ return 0;
+
+ /* under some circumstances, the gpio pin gets reconfigured
+ * to default state by the firmware, so we need to
+ * reconfigure it this behaviour has only ben seen on
+ * QCA9984 and QCA99XX devices so far
+ */
+ ath10k_wmi_gpio_config(ar, ar->hw_params.led_pin, 0,
+ WMI_GPIO_PULL_NONE, WMI_GPIO_INTTYPE_DISABLE);
+ ath10k_wmi_gpio_output(ar, ar->hw_params.led_pin, 1);
+
+ return 0;
+}
+
+int ath10k_leds_register(struct ath10k *ar)
+{
+ int ret;
+
+ if (ar->hw_params.led_pin == 0)
+ /* leds not supported */
+ return 0;
+
+ snprintf(ar->leds.label, sizeof(ar->leds.label), "ath10k-%s",
+ wiphy_name(ar->hw->wiphy));
+ ar->leds.wifi_led.active_low = 1;
+ ar->leds.wifi_led.name = ar->leds.label;
+ ar->leds.wifi_led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
+
+ ar->leds.cdev.name = ar->leds.label;
+ ar->leds.cdev.brightness_set_blocking = ath10k_leds_set_brightness_blocking;
+ ar->leds.cdev.default_trigger = ar->leds.wifi_led.default_trigger;
+
+ ret = led_classdev_register(wiphy_dev(ar->hw->wiphy), &ar->leds.cdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void ath10k_leds_unregister(struct ath10k *ar)
+{
+ if (ar->hw_params.led_pin == 0)
+ /* leds not supported */
+ return;
+
+ led_classdev_unregister(&ar->leds.cdev);
+}
+
diff --git a/drivers/net/wireless/ath/ath10k/leds.h b/drivers/net/wireless/ath/ath10k/leds.h
new file mode 100644
index 000000000000..56325b0875e5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/leds.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 Sebastian Gottschall <s.gottschall@dd-wrt.com>
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _LEDS_H_
+#define _LEDS_H_
+
+#include "core.h"
+
+#ifdef CONFIG_ATH10K_LEDS
+void ath10k_leds_unregister(struct ath10k *ar);
+int ath10k_leds_start(struct ath10k *ar);
+int ath10k_leds_register(struct ath10k *ar);
+#else
+static inline void ath10k_leds_unregister(struct ath10k *ar)
+{
+}
+
+static inline int ath10k_leds_start(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline int ath10k_leds_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+#endif
+#endif /* _LEDS_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
new file mode 100644
index 000000000000..da6f7957a0ae
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -0,0 +1,10382 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "mac.h"
+
+#include <linux/export.h>
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include <linux/random.h>
+
+#include "hif.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "htt.h"
+#include "txrx.h"
+#include "testmode.h"
+#include "wmi-tlv.h"
+#include "wmi-ops.h"
+#include "wow.h"
+#include "leds.h"
+
+/*********/
+/* Rates */
+/*********/
+
+static struct ieee80211_rate ath10k_rates[] = {
+ { .bitrate = 10,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
+ .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
+ .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
+ .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
+};
+
+static struct ieee80211_rate ath10k_rates_rev2[] = {
+ { .bitrate = 10,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
+ .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
+ .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
+ .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
+};
+
+static const struct cfg80211_sar_freq_ranges ath10k_sar_freq_ranges[] = {
+ {.start_freq = 2402, .end_freq = 2494 },
+ {.start_freq = 5170, .end_freq = 5875 },
+};
+
+static const struct cfg80211_sar_capa ath10k_sar_capa = {
+ .type = NL80211_SAR_TYPE_POWER,
+ .num_freq_ranges = (ARRAY_SIZE(ath10k_sar_freq_ranges)),
+ .freq_ranges = &ath10k_sar_freq_ranges[0],
+};
+
+#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
+
+#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
+ ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_g_rates (ath10k_rates + 0)
+#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+
+#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
+#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
+
+#define ath10k_wmi_legacy_rates ath10k_rates
+
+static bool ath10k_mac_bitrate_is_cck(int bitrate)
+{
+ switch (bitrate) {
+ case 10:
+ case 20:
+ case 55:
+ case 110:
+ return true;
+ }
+
+ return false;
+}
+
+static u8 ath10k_mac_bitrate_to_rate(int bitrate)
+{
+ return DIV_ROUND_UP(bitrate, 5) |
+ (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
+}
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck)
+{
+ const struct ieee80211_rate *rate;
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ rate = &sband->bitrates[i];
+
+ if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
+ continue;
+
+ if (rate->hw_value == hw_rate)
+ return i;
+ else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
+ rate->hw_value_short == hw_rate)
+ return i;
+ }
+
+ return 0;
+}
+
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++)
+ if (sband->bitrates[i].bitrate == bitrate)
+ return i;
+
+ return 0;
+}
+
+static int ath10k_mac_get_rate_hw_value(int bitrate)
+{
+ int i;
+ u8 hw_value_prefix = 0;
+
+ if (ath10k_mac_bitrate_is_cck(bitrate))
+ hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6;
+
+ for (i = 0; i < ARRAY_SIZE(ath10k_rates); i++) {
+ if (ath10k_rates[i].bitrate == bitrate)
+ return hw_value_prefix | ath10k_rates[i].hw_value;
+ }
+
+ return -EINVAL;
+}
+
+static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ }
+ return 0;
+}
+
+static u32
+ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
+ if (ht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u32
+ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
+ if (vht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
+{
+ enum wmi_host_platform_type platform_type;
+ int ret;
+
+ if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
+ platform_type = WMI_HOST_PLATFORM_LOW_PERF;
+ else
+ platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
+
+ ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
+
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**********/
+/* Crypto */
+/**********/
+
+static int ath10k_send_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd,
+ const u8 *macaddr, u32 flags)
+{
+ struct ath10k *ar = arvif->ar;
+ struct wmi_vdev_install_key_arg arg = {
+ .vdev_id = arvif->vdev_id,
+ .key_idx = key->keyidx,
+ .key_len = key->keylen,
+ .key_data = key->key,
+ .key_flags = flags,
+ .macaddr = macaddr,
+ };
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM];
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_TKIP];
+ arg.key_txmic_len = 8;
+ arg.key_rxmic_len = 8;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_WEP];
+ break;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM];
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_GCM];
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ WARN_ON(1);
+ return -EINVAL;
+ default:
+ ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
+ return -EOPNOTSUPP;
+ }
+
+ if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ if (cmd == DISABLE_KEY) {
+ if (flags & WMI_KEY_GROUP) {
+ /* Not all hardware handles group-key deletion operation
+ * correctly. Replace the key with a junk value to invalidate it.
+ */
+ get_random_bytes(key->key, key->keylen);
+ } else {
+ arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE];
+ arg.key_data = NULL;
+ }
+ }
+
+ return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
+}
+
+static int ath10k_install_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd,
+ const u8 *macaddr, u32 flags)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret;
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->install_key_done);
+
+ if (arvif->nohwcrypt)
+ return 1;
+
+ ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
+ if (ret)
+ return ret;
+
+ time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
+ const u8 *addr)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ int ret;
+ int i;
+ u32 flags;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
+ arvif->vif->type != NL80211_IFTYPE_ADHOC &&
+ arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
+ return -EINVAL;
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
+ if (arvif->wep_keys[i] == NULL)
+ continue;
+
+ switch (arvif->vif->type) {
+ case NL80211_IFTYPE_AP:
+ flags = WMI_KEY_PAIRWISE;
+
+ if (arvif->def_wep_key_idx == i)
+ flags |= WMI_KEY_TX_USAGE;
+
+ ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+ SET_KEY, addr, flags);
+ if (ret < 0)
+ return ret;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+ SET_KEY, addr,
+ WMI_KEY_PAIRWISE);
+ if (ret < 0)
+ return ret;
+
+ ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+ SET_KEY, addr, WMI_KEY_GROUP);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ peer->keys[i] = arvif->wep_keys[i];
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ /* In some cases (notably with static WEP IBSS with multiple keys)
+ * multicast Tx becomes broken. Both pairwise and groupwise keys are
+ * installed already. Using WMI_KEY_TX_USAGE in different combinations
+ * didn't seem help. Using def_keyid vdev parameter seems to be
+ * effective so use that.
+ *
+ * FIXME: Revisit. Perhaps this can be done in a less hacky way.
+ */
+ if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
+ return 0;
+
+ if (arvif->def_wep_key_idx == -1)
+ return 0;
+
+ ret = ath10k_wmi_vdev_set_param(arvif->ar,
+ arvif->vdev_id,
+ arvif->ar->wmi.vdev_param->def_keyid,
+ arvif->def_wep_key_idx);
+ if (ret) {
+ ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
+ const u8 *addr)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ int first_errno = 0;
+ int ret;
+ int i;
+ u32 flags = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (peer->keys[i] == NULL)
+ continue;
+
+ /* key flags are not required to delete the key */
+ ret = ath10k_install_key(arvif, peer->keys[i],
+ DISABLE_KEY, addr, flags);
+ if (ret < 0 && first_errno == 0)
+ first_errno = ret;
+
+ if (ret < 0)
+ ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
+ i, ret);
+
+ spin_lock_bh(&ar->data_lock);
+ peer->keys[i] = NULL;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ return first_errno;
+}
+
+bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
+ u8 keyidx)
+{
+ struct ath10k_peer *peer;
+ int i;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ /* We don't know which vdev this peer belongs to,
+ * since WMI doesn't give us that information.
+ *
+ * FIXME: multi-bss needs to be handled.
+ */
+ peer = ath10k_peer_find(ar, 0, addr);
+ if (!peer)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
+ return true;
+ }
+
+ return false;
+}
+
+static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ u8 addr[ETH_ALEN];
+ int first_errno = 0;
+ int ret;
+ int i;
+ u32 flags = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for (;;) {
+ /* since ath10k_install_key we can't hold data_lock all the
+ * time, so we try to remove the keys incrementally
+ */
+ spin_lock_bh(&ar->data_lock);
+ i = 0;
+ list_for_each_entry(peer, &ar->peers, list) {
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (peer->keys[i] == key) {
+ ether_addr_copy(addr, peer->addr);
+ peer->keys[i] = NULL;
+ break;
+ }
+ }
+
+ if (i < ARRAY_SIZE(peer->keys))
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (i == ARRAY_SIZE(peer->keys))
+ break;
+ /* key flags are not required to delete the key */
+ ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
+ if (ret < 0 && first_errno == 0)
+ first_errno = ret;
+
+ if (ret)
+ ath10k_warn(ar, "failed to remove key for %pM: %d\n",
+ addr, ret);
+ }
+
+ return first_errno;
+}
+
+static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(peer, &ar->peers, list) {
+ if (ether_addr_equal(peer->addr, arvif->vif->addr))
+ continue;
+
+ if (ether_addr_equal(peer->addr, arvif->bssid))
+ continue;
+
+ if (peer->keys[key->keyidx] == key)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
+ arvif->vdev_id, key->keyidx);
+
+ ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
+ arvif->vdev_id, peer->addr, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*********************/
+/* General utilities */
+/*********************/
+
+static inline enum wmi_phy_mode
+chan_to_phymode(const struct cfg80211_chan_def *chandef)
+{
+ enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+ switch (chandef->chan->band) {
+ case NL80211_BAND_2GHZ:
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
+ phymode = MODE_11B;
+ else
+ phymode = MODE_11G;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ phymode = MODE_11NG_HT20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ phymode = MODE_11NG_HT40;
+ break;
+ default:
+ phymode = MODE_UNKNOWN;
+ break;
+ }
+ break;
+ case NL80211_BAND_5GHZ:
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ phymode = MODE_11A;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ phymode = MODE_11NA_HT20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ phymode = MODE_11NA_HT40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ phymode = MODE_11AC_VHT80;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ phymode = MODE_11AC_VHT160;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ phymode = MODE_11AC_VHT80_80;
+ break;
+ default:
+ phymode = MODE_UNKNOWN;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ WARN_ON(phymode == MODE_UNKNOWN);
+ return phymode;
+}
+
+static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
+{
+/*
+ * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
+ * 0 for no restriction
+ * 1 for 1/4 us
+ * 2 for 1/2 us
+ * 3 for 1 us
+ * 4 for 2 us
+ * 5 for 4 us
+ * 6 for 8 us
+ * 7 for 16 us
+ */
+ switch (mpdudensity) {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ /* Our lower layer calculations limit our precision to
+ * 1 microsecond
+ */
+ return 1;
+ case 4:
+ return 2;
+ case 5:
+ return 4;
+ case 6:
+ return 8;
+ case 7:
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ieee80211_chanctx_conf *conf;
+
+ rcu_read_lock();
+ conf = rcu_dereference(vif->bss_conf.chanctx_conf);
+ if (!conf) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ *def = conf->def;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ int *num = data;
+
+ (*num)++;
+}
+
+static int ath10k_mac_num_chanctxs(struct ath10k *ar)
+{
+ int num = 0;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_num_chanctxs_iter,
+ &num);
+
+ return num;
+}
+
+static void
+ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def **def = data;
+
+ *def = &conf->def;
+}
+
+static void ath10k_wait_for_peer_delete_done(struct ath10k *ar, u32 vdev_id,
+ const u8 *addr)
+{
+ unsigned long time_left;
+ int ret;
+
+ if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) {
+ ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
+ if (ret) {
+ ath10k_warn(ar, "failed wait for peer deleted");
+ return;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->peer_delete_done,
+ 5 * HZ);
+ if (!time_left)
+ ath10k_warn(ar, "Timeout in receiving peer delete response\n");
+ }
+}
+
+static int ath10k_peer_create(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 vdev_id,
+ const u8 *addr,
+ enum wmi_peer_type peer_type)
+{
+ struct ath10k_peer *peer;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* Each vdev consumes a peer entry as well. */
+ if (ar->num_peers + list_count_nodes(&ar->arvifs) >= ar->max_num_peers)
+ return -ENOBUFS;
+
+ ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
+ if (ret) {
+ ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
+ addr, vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
+ addr, vdev_id, ret);
+ return ret;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, vdev_id, addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->data_lock);
+ ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
+ addr, vdev_id);
+ ath10k_wait_for_peer_delete_done(ar, vdev_id, addr);
+ return -ENOENT;
+ }
+
+ peer->vif = vif;
+ peer->sta = sta;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ar->num_peers++;
+
+ return 0;
+}
+
+static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 param;
+ int ret;
+
+ param = ar->wmi.pdev_param->sta_kickout_th;
+ ret = ath10k_wmi_pdev_set_param(ar, param,
+ ATH10K_KICKOUT_THRESHOLD);
+ if (ret) {
+ ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+ ATH10K_KEEPALIVE_MIN_IDLE);
+ if (ret) {
+ ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+ ATH10K_KEEPALIVE_MAX_IDLE);
+ if (ret) {
+ ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+ ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
+ if (ret) {
+ ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+
+ vdev_param = ar->wmi.vdev_param->rts_threshold;
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
+}
+
+static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) {
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout
+ (&ar->peer_delete_done, 5 * HZ);
+
+ if (!time_left) {
+ ath10k_warn(ar, "Timeout in receiving peer delete response\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ ar->num_peers--;
+
+ return 0;
+}
+
+static void ath10k_peer_map_cleanup(struct ath10k *ar, struct ath10k_peer *peer)
+{
+ int peer_id, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for_each_set_bit(peer_id, peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS) {
+ ar->peer_map[peer_id] = NULL;
+ }
+
+ /* Double check that peer is properly un-referenced from
+ * the peer_map
+ */
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ if (ar->peer_map[i] == peer) {
+ ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %p idx %d)\n",
+ peer->addr, peer, i);
+ ar->peer_map[i] = NULL;
+ }
+ }
+
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+}
+
+static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
+{
+ struct ath10k_peer *peer, *tmp;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+
+ ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
+ peer->addr, vdev_id);
+
+ ath10k_peer_map_cleanup(ar, peer);
+ }
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_peer_cleanup_all(struct ath10k *ar)
+{
+ struct ath10k_peer *peer, *tmp;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
+ list_del(&peer->list);
+ kfree(peer);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++)
+ ar->peer_map[i] = NULL;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ar->num_peers = 0;
+ ar->num_stations = 0;
+}
+
+static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
+ struct ieee80211_sta *sta,
+ enum wmi_tdls_peer_state state)
+{
+ int ret;
+ struct wmi_tdls_peer_update_cmd_arg arg = {};
+ struct wmi_tdls_peer_capab_arg cap = {};
+ struct wmi_channel_arg chan_arg = {};
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arg.vdev_id = vdev_id;
+ arg.peer_state = state;
+ ether_addr_copy(arg.addr, sta->addr);
+
+ cap.peer_max_sp = sta->max_sp;
+ cap.peer_uapsd_queues = sta->uapsd_queues;
+
+ if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
+ !sta->tdls_initiator)
+ cap.is_peer_responder = 1;
+
+ ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
+ arg.addr, vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/************************/
+/* Interface management */
+/************************/
+
+void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!arvif->beacon)
+ return;
+
+ if (!arvif->beacon_buf)
+ dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
+ arvif->beacon->len, DMA_TO_DEVICE);
+
+ if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
+ arvif->beacon_state != ATH10K_BEACON_SENT))
+ return;
+
+ dev_kfree_skb_any(arvif->beacon);
+
+ arvif->beacon = NULL;
+ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
+}
+
+static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath10k_mac_vif_beacon_free(arvif);
+
+ if (arvif->beacon_buf) {
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
+ kfree(arvif->beacon_buf);
+ else
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf,
+ arvif->beacon_paddr);
+ arvif->beacon_buf = NULL;
+ }
+}
+
+static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
+{
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ return -ESHUTDOWN;
+
+ time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH10K_VDEV_SETUP_TIMEOUT_HZ);
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return ar->last_wmi_vdev_start_status;
+}
+
+static inline int ath10k_vdev_delete_sync(struct ath10k *ar)
+{
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map))
+ return 0;
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ return -ESHUTDOWN;
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH10K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
+{
+ struct cfg80211_chan_def *chandef = NULL;
+ struct ieee80211_channel *channel = NULL;
+ struct wmi_vdev_start_request_arg arg = {};
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_get_any_chandef_iter,
+ &chandef);
+ if (WARN_ON_ONCE(!chandef))
+ return -ENOENT;
+
+ channel = chandef->chan;
+
+ arg.vdev_id = vdev_id;
+ arg.channel.freq = channel->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
+
+ /* TODO setup this dynamically, what in case we
+ * don't have any vifs?
+ */
+ arg.channel.mode = chan_to_phymode(chandef);
+ arg.channel.chan_radar =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = channel->max_power * 2;
+ arg.channel.max_reg_power = channel->max_reg_power * 2;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain;
+
+ reinit_completion(&ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath10k_wmi_vdev_start(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
+ vdev_id, ret);
+ goto vdev_stop;
+ }
+
+ ar->monitor_vdev_id = vdev_id;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
+ ar->monitor_vdev_id);
+ return 0;
+
+vdev_stop:
+ ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ return ret;
+}
+
+static int ath10k_monitor_vdev_stop(struct ath10k *ar)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ reinit_completion(&ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to request monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
+ ar->monitor_vdev_id);
+ return ret;
+}
+
+static int ath10k_monitor_vdev_create(struct ath10k *ar)
+{
+ int bit, ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->free_vdev_map == 0) {
+ ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
+ return -ENOMEM;
+ }
+
+ bit = __ffs64(ar->free_vdev_map);
+
+ ar->monitor_vdev_id = bit;
+
+ ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
+ WMI_VDEV_TYPE_MONITOR,
+ 0, ar->mac_addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+}
+
+static int ath10k_monitor_vdev_delete(struct ath10k *ar)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
+ ar->monitor_vdev_id);
+ return ret;
+}
+
+static int ath10k_monitor_start(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_monitor_vdev_create(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
+ ath10k_monitor_vdev_delete(ar);
+ return ret;
+ }
+
+ ar->monitor_started = true;
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
+
+ return 0;
+}
+
+static int ath10k_monitor_stop(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_monitor_vdev_stop(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_monitor_vdev_delete(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ ar->monitor_started = false;
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
+
+ return 0;
+}
+
+static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
+{
+ int num_ctx;
+
+ /* At least one chanctx is required to derive a channel to start
+ * monitor vdev on.
+ */
+ num_ctx = ath10k_mac_num_chanctxs(ar);
+ if (num_ctx == 0)
+ return false;
+
+ /* If there's already an existing special monitor interface then don't
+ * bother creating another monitor vdev.
+ */
+ if (ar->monitor_arvif)
+ return false;
+
+ return ar->monitor ||
+ (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST,
+ ar->running_fw->fw_file.fw_features) &&
+ (ar->filter_flags & (FIF_OTHER_BSS | FIF_MCAST_ACTION))) ||
+ test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+}
+
+static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
+{
+ int num_ctx;
+
+ num_ctx = ath10k_mac_num_chanctxs(ar);
+
+ /* FIXME: Current interface combinations and cfg80211/mac80211 code
+ * shouldn't allow this but make sure to prevent handling the following
+ * case anyway since multi-channel DFS hasn't been tested at all.
+ */
+ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
+ return false;
+
+ return true;
+}
+
+static int ath10k_monitor_recalc(struct ath10k *ar)
+{
+ bool needed;
+ bool allowed;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ needed = ath10k_mac_monitor_vdev_is_needed(ar);
+ allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac monitor recalc started? %d needed? %d allowed? %d\n",
+ ar->monitor_started, needed, allowed);
+
+ if (WARN_ON(needed && !allowed)) {
+ if (ar->monitor_started) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
+
+ ret = ath10k_monitor_stop(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
+ ret);
+ /* not serious */
+ }
+
+ return -EPERM;
+ }
+
+ if (needed == ar->monitor_started)
+ return 0;
+
+ if (needed)
+ return ath10k_monitor_start(ar);
+ else
+ return ath10k_monitor_stop(ar);
+}
+
+static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!arvif->is_started) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = ar->wmi.vdev_param->protection_mode;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n",
+ arvif->vdev_id, arvif->use_cts_prot);
+
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ arvif->use_cts_prot ? 1 : 0);
+}
+
+static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param, rts_cts = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = ar->wmi.vdev_param->enable_rtscts;
+
+ rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
+
+ if (arvif->num_legacy_stations > 0)
+ rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
+ WMI_RTSCTS_PROFILE);
+ else
+ rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
+ WMI_RTSCTS_PROFILE);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
+ arvif->vdev_id, rts_cts);
+
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ rts_cts);
+}
+
+static int ath10k_start_cac(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+ ret = ath10k_monitor_recalc(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+}
+
+static int ath10k_stop_cac(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* CAC is not running - do nothing */
+ if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
+ return 0;
+
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ath10k_monitor_stop(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
+
+ return 0;
+}
+
+static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ bool *ret = data;
+
+ if (!*ret && conf->radar_enabled)
+ *ret = true;
+}
+
+static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
+{
+ bool has_radar = false;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_has_radar_iter,
+ &has_radar);
+
+ return has_radar;
+}
+
+static void ath10k_recalc_radar_detection(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_stop_cac(ar);
+
+ if (!ath10k_mac_has_radar_enabled(ar))
+ return;
+
+ if (ar->num_started_vdevs > 0)
+ return;
+
+ ret = ath10k_start_cac(ar);
+ if (ret) {
+ /*
+ * Not possible to start CAC on current channel so starting
+ * radiation is not allowed, make this channel DFS_UNAVAILABLE
+ * by indicating that radar was detected.
+ */
+ ath10k_warn(ar, "failed to start CAC: %d\n", ret);
+ ieee80211_radar_detected(ar->hw, NULL);
+ }
+}
+
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to synchronize setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ if (ar->num_started_vdevs != 0) {
+ ar->num_started_vdevs--;
+ ath10k_recalc_radar_detection(ar);
+ }
+
+ return ret;
+}
+
+static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
+ const struct cfg80211_chan_def *chandef,
+ bool restart)
+{
+ struct ath10k *ar = arvif->ar;
+ struct wmi_vdev_start_request_arg arg = {};
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_delete_done);
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.dtim_period = arvif->dtim_period;
+ arg.bcn_intval = arvif->beacon_interval;
+
+ arg.channel.freq = chandef->chan->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
+ arg.channel.mode = chan_to_phymode(chandef);
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = chandef->chan->max_power * 2;
+ arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
+ arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = arvif->u.ap.ssid;
+ arg.ssid_len = arvif->u.ap.ssid_len;
+ arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.channel.chan_radar =
+ !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+ } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ arg.ssid = arvif->vif->cfg.ssid;
+ arg.ssid_len = arvif->vif->cfg.ssid_len;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.channel.freq,
+ ath10k_wmi_phymode_str(arg.channel.mode));
+
+ if (restart)
+ ret = ath10k_wmi_vdev_restart(ar, &arg);
+ else
+ ret = ath10k_wmi_vdev_start(ar, &arg);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
+ arg.vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to synchronize setup for vdev %i restart %d: %d\n",
+ arg.vdev_id, restart, ret);
+ return ret;
+ }
+
+ ar->num_started_vdevs++;
+ ath10k_recalc_radar_detection(ar);
+
+ return ret;
+}
+
+static int ath10k_vdev_start(struct ath10k_vif *arvif,
+ const struct cfg80211_chan_def *def)
+{
+ return ath10k_vdev_start_restart(arvif, def, false);
+}
+
+static int ath10k_vdev_restart(struct ath10k_vif *arvif,
+ const struct cfg80211_chan_def *def)
+{
+ return ath10k_vdev_start_restart(arvif, def, true);
+}
+
+static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_mgmt *mgmt;
+ const u8 *p2p_ie;
+ int ret;
+
+ if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
+ return 0;
+
+ mgmt = (void *)bcn->data;
+ p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ mgmt->u.beacon.variable,
+ bcn->len - (mgmt->u.beacon.variable -
+ bcn->data));
+ if (!p2p_ie)
+ return -ENOENT;
+
+ ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
+ u8 oui_type, size_t ie_offset)
+{
+ size_t len;
+ const u8 *next;
+ const u8 *end;
+ u8 *ie;
+
+ if (WARN_ON(skb->len < ie_offset))
+ return -EINVAL;
+
+ ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
+ skb->data + ie_offset,
+ skb->len - ie_offset);
+ if (!ie)
+ return -ENOENT;
+
+ len = ie[1] + 2;
+ end = skb->data + skb->len;
+ next = ie + len;
+
+ if (WARN_ON(next > end))
+ return -EINVAL;
+
+ memmove(ie, next, end - next);
+ skb_trim(skb, skb->len - len);
+
+ return 0;
+}
+
+static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_mutable_offsets offs = {};
+ struct sk_buff *bcn;
+ int ret;
+
+ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+ return 0;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ return 0;
+
+ bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+ if (!bcn) {
+ ath10k_warn(ar, "failed to get beacon template from mac80211\n");
+ return -EPERM;
+ }
+
+ ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
+ kfree_skb(bcn);
+ return ret;
+ }
+
+ /* P2P IE is inserted by firmware automatically (as configured above)
+ * so remove it from the base beacon template to avoid duplicate P2P
+ * IEs in beacon frames.
+ */
+ ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ offsetof(struct ieee80211_mgmt,
+ u.beacon.variable));
+
+ ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
+ 0, NULL, 0);
+ kfree_skb(bcn);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to submit beacon template command: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct sk_buff *prb;
+ int ret;
+
+ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+ return 0;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
+
+ /* For mesh, probe response and beacon share the same template */
+ if (ieee80211_vif_is_mesh(vif))
+ return 0;
+
+ prb = ieee80211_proberesp_get(hw, vif);
+ if (!prb) {
+ ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
+ return -EPERM;
+ }
+
+ ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
+ kfree_skb(prb);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct cfg80211_chan_def def;
+ int ret;
+
+ /* When originally vdev is started during assign_vif_chanctx() some
+ * information is missing, notably SSID. Firmware revisions with beacon
+ * offloading require the SSID to be provided during vdev (re)start to
+ * handle hidden SSID properly.
+ *
+ * Vdev restart must be done after vdev has been both started and
+ * upped. Otherwise some firmware revisions (at least 10.2) fail to
+ * deliver vdev restart response event causing timeouts during vdev
+ * syncing in ath10k.
+ *
+ * Note: The vdev down/up and template reinstallation could be skipped
+ * since only wmi-tlv firmware are known to have beacon offload and
+ * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
+ * response delivery. It's probably more robust to keep it as is.
+ */
+ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+ return 0;
+
+ if (WARN_ON(!arvif->is_started))
+ return -EINVAL;
+
+ if (WARN_ON(!arvif->is_up))
+ return -EINVAL;
+
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ return -EINVAL;
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
+ * firmware will crash upon vdev up.
+ */
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to update presp template: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_restart(arvif, &def);
+ if (ret) {
+ ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_control_beaconing(struct ath10k_vif *arvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret = 0;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (!info->enable_beacon) {
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+
+ spin_lock_bh(&arvif->ar->data_lock);
+ ath10k_mac_vif_beacon_free(arvif);
+ spin_unlock_bh(&arvif->ar->data_lock);
+
+ return;
+ }
+
+ arvif->tx_seq_no = 0x1000;
+
+ arvif->aid = 0;
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
+}
+
+static void ath10k_control_ibss(struct ath10k_vif *arvif,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret = 0;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (!vif->cfg.ibss_joined) {
+ if (is_zero_ether_addr(arvif->bssid))
+ return;
+
+ eth_zero_addr(arvif->bssid);
+
+ return;
+ }
+
+ vdev_param = arvif->ar->wmi.vdev_param->atim_window;
+ ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
+ ATH10K_DEFAULT_ATIM);
+ if (ret)
+ ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+}
+
+static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 param;
+ u32 value;
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
+ else
+ value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+
+ param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
+ value, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 param;
+ u32 value;
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
+ else
+ value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+
+ param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
+ value, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_num_vifs_started(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int num = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ if (arvif->is_started)
+ num++;
+
+ return num;
+}
+
+static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_conf *conf = &ar->hw->conf;
+ enum wmi_sta_powersave_param param;
+ enum wmi_sta_ps_mode psmode;
+ int ret;
+ int ps_timeout;
+ bool enable_ps;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->vif->type != NL80211_IFTYPE_STATION)
+ return 0;
+
+ enable_ps = arvif->ps;
+
+ if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
+ !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
+ ar->running_fw->fw_file.fw_features)) {
+ ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
+ arvif->vdev_id);
+ enable_ps = false;
+ }
+
+ if (!arvif->is_started) {
+ /* mac80211 can update vif powersave state while disconnected.
+ * Firmware doesn't behave nicely and consumes more power than
+ * necessary if PS is disabled on a non-started vdev. Hence
+ * force-enable PS for non-running vdevs.
+ */
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ } else if (enable_ps) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+ ps_timeout = conf->dynamic_ps_timeout;
+ if (ps_timeout == 0) {
+ /* Firmware doesn't like 0 */
+ ps_timeout = ieee80211_tu_to_usec(
+ vif->bss_conf.beacon_int) / 1000;
+ }
+
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+ ps_timeout);
+ if (ret) {
+ ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ } else {
+ psmode = WMI_STA_PS_MODE_DISABLED;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
+ if (ret) {
+ ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct wmi_sta_keepalive_arg arg = {};
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
+ return 0;
+
+ /* Some firmware revisions have a bug and ignore the `enabled` field.
+ * Instead use the interval to disable the keepalive.
+ */
+ arg.vdev_id = arvif->vdev_id;
+ arg.enabled = 1;
+ arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
+ arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
+
+ ret = ath10k_wmi_sta_keepalive(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
+ return;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return;
+
+ if (!vif->bss_conf.csa_active)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ if (!ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
+ ieee80211_beacon_update_cntdwn(vif, 0);
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+ ret);
+ } else {
+ ieee80211_csa_finish(vif, 0);
+ }
+}
+
+static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
+{
+ struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+ ap_csa_work);
+ struct ath10k *ar = arvif->ar;
+
+ mutex_lock(&ar->conf_mutex);
+ ath10k_mac_vif_ap_csa_count_down(arvif);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = data;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
+ return;
+
+ cancel_delayed_work(&arvif->connection_loss_work);
+}
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ ATH10K_ITER_NORMAL_FLAGS,
+ ath10k_mac_handle_beacon_iter,
+ skb);
+}
+
+static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u32 *vdev_id = data;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+
+ if (arvif->vdev_id != *vdev_id)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_beacon_loss(vif);
+
+ /* Firmware doesn't report beacon loss events repeatedly. If AP probe
+ * (done by mac80211) succeeds but beacons do not resume then it
+ * doesn't make sense to continue operation. Queue connection loss work
+ * which can be cancelled when beacon is received.
+ */
+ ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
+ ATH10K_CONNECTION_LOSS_HZ);
+}
+
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ ATH10K_ITER_NORMAL_FLAGS,
+ ath10k_mac_handle_beacon_miss_iter,
+ &vdev_id);
+}
+
+static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
+{
+ struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+ connection_loss_work.work);
+ struct ieee80211_vif *vif = arvif->vif;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_connection_loss(vif);
+}
+
+/**********************/
+/* Station management */
+/**********************/
+
+static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
+ struct ieee80211_vif *vif)
+{
+ /* Some firmware revisions have unstable STA powersave when listen
+ * interval is set too high (e.g. 5). The symptoms are firmware doesn't
+ * generate NullFunc frames properly even if buffered frames have been
+ * indicated in Beacon TIM. Firmware would seldom wake up to pull
+ * buffered frames. Often pinging the device from AP would simply fail.
+ *
+ * As a workaround set it to 1.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION)
+ return 1;
+
+ return ar->hw->conf.listen_interval;
+}
+
+static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ u32 aid;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ aid = vif->cfg.aid;
+ else
+ aid = sta->aid;
+
+ ether_addr_copy(arg->addr, sta->addr);
+ arg->vdev_id = arvif->vdev_id;
+ arg->peer_aid = aid;
+ arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
+ arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
+ arg->peer_num_spatial_streams = 1;
+ arg->peer_caps = vif->bss_conf.assoc_capability;
+}
+
+static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct cfg80211_chan_def def;
+ struct cfg80211_bss *bss;
+ const u8 *rsnie = NULL;
+ const u8 *wpaie = NULL;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid,
+ vif->cfg.ssid_len ? vif->cfg.ssid : NULL,
+ vif->cfg.ssid_len,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+ if (bss) {
+ const struct cfg80211_bss_ies *ies;
+
+ rcu_read_lock();
+ rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
+
+ ies = rcu_dereference(bss->ies);
+
+ wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies->data,
+ ies->len);
+ rcu_read_unlock();
+ cfg80211_put_bss(ar->hw->wiphy, bss);
+ }
+
+ /* FIXME: base on RSN IE/WPA IE is a correct idea? */
+ if (rsnie || wpaie) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
+ arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
+ }
+
+ if (wpaie) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
+ arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
+ }
+
+ if (sta->mfp &&
+ test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
+ ar->running_fw->fw_file.fw_features)) {
+ arg->peer_flags |= ar->wmi.peer_flags->pmf;
+ }
+}
+
+static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct cfg80211_chan_def def;
+ const struct ieee80211_supported_band *sband;
+ const struct ieee80211_rate *rates;
+ enum nl80211_band band;
+ u32 ratemask;
+ u8 rate;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ sband = ar->hw->wiphy->bands[band];
+ ratemask = sta->deflink.supp_rates[band];
+ ratemask &= arvif->bitrate_mask.control[band].legacy;
+ rates = sband->bitrates;
+
+ rateset->num_rates = 0;
+
+ for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
+ if (!(ratemask & 1))
+ continue;
+
+ rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
+ rateset->rates[rateset->num_rates] = rate;
+ rateset->num_rates++;
+ }
+}
+
+static bool
+ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
+ if (ht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static bool
+ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
+ if (vht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ int i, n;
+ u8 max_nss;
+ u32 stbc;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
+ ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
+ arg->peer_flags |= ar->wmi.peer_flags->ht;
+ arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ht_cap->ampdu_factor)) - 1;
+
+ arg->peer_mpdu_density =
+ ath10k_parse_mpdudensity(ht_cap->ampdu_density);
+
+ arg->peer_ht_caps = ht_cap->cap;
+ arg->peer_rate_caps |= WMI_RC_HT_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
+ arg->peer_flags |= ar->wmi.peer_flags->ldbc;
+
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
+ arg->peer_flags |= ar->wmi.peer_flags->bw40;
+ arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
+ }
+
+ if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+ arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+ arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
+ arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
+ arg->peer_flags |= ar->wmi.peer_flags->stbc;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
+ stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
+ stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
+ arg->peer_rate_caps |= stbc;
+ arg->peer_flags |= ar->wmi.peer_flags->stbc;
+ }
+
+ if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
+ arg->peer_rate_caps |= WMI_RC_TS_FLAG;
+ else if (ht_cap->mcs.rx_mask[1])
+ arg->peer_rate_caps |= WMI_RC_DS_FLAG;
+
+ for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
+ if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
+ (ht_mcs_mask[i / 8] & BIT(i % 8))) {
+ max_nss = (i / 8) + 1;
+ arg->peer_ht_rates.rates[n++] = i;
+ }
+
+ /*
+ * This is a workaround for HT-enabled STAs which break the spec
+ * and have no HT capabilities RX mask (no HT RX MCS map).
+ *
+ * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
+ * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
+ *
+ * Firmware asserts if such situation occurs.
+ */
+ if (n == 0) {
+ arg->peer_ht_rates.num_rates = 8;
+ for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
+ arg->peer_ht_rates.rates[i] = i;
+ } else {
+ arg->peer_ht_rates.num_rates = n;
+ arg->peer_num_spatial_streams = min(sta->deflink.rx_nss,
+ max_nss);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+ arg->addr,
+ arg->peer_ht_rates.num_rates,
+ arg->peer_num_spatial_streams);
+}
+
+static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ u32 uapsd = 0;
+ u32 max_sp = 0;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (sta->wme && sta->uapsd_queues) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
+ sta->uapsd_queues, sta->max_sp);
+
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
+
+ if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
+ max_sp = sta->max_sp;
+
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+ sta->addr,
+ WMI_AP_PS_PEER_PARAM_UAPSD,
+ uapsd);
+ if (ret) {
+ ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+ sta->addr,
+ WMI_AP_PS_PEER_PARAM_MAX_SP,
+ max_sp);
+ if (ret) {
+ ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ /* TODO setup this based on STA listen interval and
+ * beacon interval. Currently we don't know
+ * sta->listen_interval - mac80211 patch required.
+ * Currently use 10 seconds
+ */
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
+ 10);
+ if (ret) {
+ ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static u16
+ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
+ const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+ mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
+ vht_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ default:
+ /* see ath10k_mac_can_set_bitrate_mask() */
+ WARN_ON(1);
+ fallthrough;
+ case -1:
+ mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
+ break;
+ case 7:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
+ break;
+ case 9:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
+ break;
+ }
+
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
+static u32 get_160mhz_nss_from_maxrate(int rate)
+{
+ u32 nss;
+
+ switch (rate) {
+ case 780:
+ nss = 1;
+ break;
+ case 1560:
+ nss = 2;
+ break;
+ case 2106:
+ nss = 3; /* not support MCS9 from spec*/
+ break;
+ case 3120:
+ nss = 4;
+ break;
+ default:
+ nss = 1;
+ }
+
+ return nss;
+}
+
+static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u16 *vht_mcs_mask;
+ u8 ampdu_factor;
+ u8 max_nss, vht_mcs;
+ int i;
+
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!vht_cap->vht_supported)
+ return;
+
+ band = def.chan->band;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
+ arg->peer_flags |= ar->wmi.peer_flags->vht;
+
+ if (def.chan->band == NL80211_BAND_2GHZ)
+ arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
+
+ arg->peer_vht_caps = vht_cap->cap;
+
+ ampdu_factor = (vht_cap->cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+ * zero in VHT IE. Using it would result in degraded throughput.
+ * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+ * it if VHT max_mpdu is smaller.
+ */
+ arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+ (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1);
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->peer_flags |= ar->wmi.peer_flags->bw80;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->peer_flags |= ar->wmi.peer_flags->bw160;
+
+ /* Calculate peer NSS capability from VHT capabilities if STA
+ * supports VHT.
+ */
+ for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
+ (2 * i) & 3;
+
+ if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) &&
+ vht_mcs_mask[i])
+ max_nss = i + 1;
+ }
+ arg->peer_num_spatial_streams = min(sta->deflink.rx_nss, max_nss);
+ arg->peer_vht_rates.rx_max_rate =
+ __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
+ arg->peer_vht_rates.rx_mcs_set =
+ __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
+ arg->peer_vht_rates.tx_max_rate =
+ __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
+ arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
+ __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
+
+ /* Configure bandwidth-NSS mapping to FW
+ * for the chip's tx chains setting on 160Mhz bw
+ */
+ if (arg->peer_phymode == MODE_11AC_VHT160 ||
+ arg->peer_phymode == MODE_11AC_VHT80_80) {
+ u32 rx_nss;
+ u32 max_rate;
+
+ max_rate = arg->peer_vht_rates.rx_max_rate;
+ rx_nss = get_160mhz_nss_from_maxrate(max_rate);
+
+ if (rx_nss == 0)
+ rx_nss = arg->peer_num_spatial_streams;
+ else
+ rx_nss = min(arg->peer_num_spatial_streams, rx_nss);
+
+ max_rate = hw->vht160_mcs_tx_highest;
+ rx_nss = min(rx_nss, get_160mhz_nss_from_maxrate(max_rate));
+
+ arg->peer_bw_rxnss_override =
+ FIELD_PREP(WMI_PEER_NSS_MAP_ENABLE, 1) |
+ FIELD_PREP(WMI_PEER_NSS_160MHZ_MASK, (rx_nss - 1));
+
+ if (arg->peer_phymode == MODE_11AC_VHT80_80) {
+ arg->peer_bw_rxnss_override |=
+ FIELD_PREP(WMI_PEER_NSS_80_80MHZ_MASK, (rx_nss - 1));
+ }
+ }
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vht peer %pM max_mpdu %d flags 0x%x peer_rx_nss_override 0x%x\n",
+ sta->addr, arg->peer_max_mpdu,
+ arg->peer_flags, arg->peer_bw_rxnss_override);
+}
+
+static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ if (sta->wme)
+ arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
+
+ if (sta->wme && sta->uapsd_queues) {
+ arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
+ arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
+ }
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (sta->wme)
+ arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
+ break;
+ case WMI_VDEV_TYPE_IBSS:
+ if (sta->wme)
+ arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
+ break;
+ default:
+ break;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
+ sta->addr, !!(arg->peer_flags &
+ arvif->ar->wmi.peer_flags->qos));
+}
+
+static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+{
+ return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
+ ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+}
+
+static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar,
+ struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+ return MODE_11AC_VHT160;
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
+ return MODE_11AC_VHT80_80;
+ default:
+ /* not sure if this is a valid case? */
+ return MODE_11AC_VHT160;
+ }
+ }
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AC_VHT80;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AC_VHT40;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AC_VHT20;
+
+ return MODE_UNKNOWN;
+}
+
+static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ if (sta->deflink.vht_cap.vht_supported &&
+ !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->deflink.ht_cap.ht_supported &&
+ !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NG_HT40;
+ else
+ phymode = MODE_11NG_HT20;
+ } else if (ath10k_mac_sta_has_ofdm_only(sta)) {
+ phymode = MODE_11G;
+ } else {
+ phymode = MODE_11B;
+ }
+
+ break;
+ case NL80211_BAND_5GHZ:
+ /*
+ * Check VHT first.
+ */
+ if (sta->deflink.vht_cap.vht_supported &&
+ !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ phymode = ath10k_mac_get_phymode_vht(ar, sta);
+ } else if (sta->deflink.ht_cap.ht_supported &&
+ !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NA_HT40;
+ else
+ phymode = MODE_11NA_HT20;
+ } else {
+ phymode = MODE_11A;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
+ sta->addr, ath10k_wmi_phymode_str(phymode));
+
+ arg->peer_phymode = phymode;
+ WARN_ON(phymode == MODE_UNKNOWN);
+}
+
+static int ath10k_peer_assoc_prepare(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ memset(arg, 0, sizeof(*arg));
+
+ ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
+
+ return 0;
+}
+
+static const u32 ath10k_smps_map[] = {
+ [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
+ [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
+ [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
+ [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
+};
+
+static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
+ const u8 *addr,
+ const struct ieee80211_sta_ht_cap *ht_cap)
+{
+ int smps;
+
+ if (!ht_cap->ht_supported)
+ return 0;
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (smps >= ARRAY_SIZE(ath10k_smps_map))
+ return -EINVAL;
+
+ return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
+ ar->wmi.peer_param->smps_state,
+ ath10k_smps_map[smps]);
+}
+
+static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta_vht_cap vht_cap)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+ u32 param;
+ u32 value;
+
+ if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
+ return 0;
+
+ if (!(ar->vht_cap_info &
+ (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
+ return 0;
+
+ param = ar->wmi.vdev_param->txbf;
+ value = 0;
+
+ if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
+ return 0;
+
+ /* The following logic is correct. If a remote STA advertises support
+ * for being a beamformer then we should enable us being a beamformee.
+ */
+
+ if (ar->vht_cap_info &
+ (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+ if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
+ }
+
+ if (ar->vht_cap_info &
+ (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+ if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
+ }
+
+ if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
+ value, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool ath10k_mac_is_connected(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ return true;
+ }
+
+ return false;
+}
+
+static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
+{
+ int ret;
+ u32 param;
+ int tx_power_2g, tx_power_5g;
+ bool connected;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* ath10k internally uses unit of 0.5 dBm so multiply by 2 */
+ tx_power_2g = txpower * 2;
+ tx_power_5g = txpower * 2;
+
+ connected = ath10k_mac_is_connected(ar);
+
+ if (connected && ar->tx_power_2g_limit)
+ if (tx_power_2g > ar->tx_power_2g_limit)
+ tx_power_2g = ar->tx_power_2g_limit;
+
+ if (connected && ar->tx_power_5g_limit)
+ if (tx_power_5g > ar->tx_power_5g_limit)
+ tx_power_5g = ar->tx_power_5g_limit;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower 2g: %d, 5g: %d\n",
+ tx_power_2g, tx_power_5g);
+
+ param = ar->wmi.pdev_param->txpower_limit2g;
+ ret = ath10k_wmi_pdev_set_param(ar, param, tx_power_2g);
+ if (ret) {
+ ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
+ tx_power_2g, ret);
+ return ret;
+ }
+
+ param = ar->wmi.pdev_param->txpower_limit5g;
+ ret = ath10k_wmi_pdev_set_param(ar, param, tx_power_5g);
+ if (ret) {
+ ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
+ tx_power_5g, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_txpower_recalc(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret, txpower = -1;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ /* txpower not initialized yet? */
+ if (arvif->txpower == INT_MIN)
+ continue;
+
+ if (txpower == -1)
+ txpower = arvif->txpower;
+ else
+ txpower = min(txpower, arvif->txpower);
+ }
+
+ if (txpower == -1)
+ return 0;
+
+ ret = ath10k_mac_txpower_setup(ar, txpower);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup tx power %d: %d\n",
+ txpower, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_set_sar_power(struct ath10k *ar)
+{
+ if (!ar->hw_params.dynamic_sar_support)
+ return -EOPNOTSUPP;
+
+ if (!ath10k_mac_is_connected(ar))
+ return 0;
+
+ /* if connected, then arvif->txpower must be valid */
+ return ath10k_mac_txpower_recalc(ar);
+}
+
+static int ath10k_mac_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ const struct cfg80211_sar_sub_specs *sub_specs;
+ struct ath10k *ar = hw->priv;
+ u32 i;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (!ar->hw_params.dynamic_sar_support) {
+ ret = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (!sar || sar->type != NL80211_SAR_TYPE_POWER ||
+ sar->num_sub_specs == 0) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ sub_specs = sar->sub_specs;
+
+ /* 0dbm is not a practical value for ath10k, so use 0
+ * as no SAR limitation on it.
+ */
+ ar->tx_power_2g_limit = 0;
+ ar->tx_power_5g_limit = 0;
+
+ /* note the power is in 0.25dbm unit, while ath10k uses
+ * 0.5dbm unit.
+ */
+ for (i = 0; i < sar->num_sub_specs; i++) {
+ if (sub_specs->freq_range_index == 0)
+ ar->tx_power_2g_limit = sub_specs->power / 2;
+ else if (sub_specs->freq_range_index == 1)
+ ar->tx_power_5g_limit = sub_specs->power / 2;
+
+ sub_specs++;
+ }
+
+ ret = ath10k_mac_set_sar_power(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to set sar power: %d", ret);
+ goto err;
+ }
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+/* can be called only in mac80211 callbacks due to `key_count` usage */
+static void ath10k_bss_assoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
+ struct wmi_peer_assoc_complete_arg peer_arg;
+ struct ieee80211_sta *ap_sta;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
+ arvif->vdev_id, arvif->bssid, arvif->aid);
+
+ rcu_read_lock();
+
+ ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!ap_sta) {
+ ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ /* ap_sta must be accessed only within rcu section which must be left
+ * before calling ath10k_setup_peer_smps() which might sleep.
+ */
+ ht_cap = ap_sta->deflink.ht_cap;
+ vht_cap = ap_sta->deflink.vht_cap;
+
+ ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
+ bss_conf->bssid, arvif->vdev_id, ret);
+ rcu_read_unlock();
+ return;
+ }
+
+ rcu_read_unlock();
+
+ ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
+ bss_conf->bssid, arvif->vdev_id, ret);
+ return;
+ }
+
+ ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
+ arvif->vdev_id, bss_conf->bssid, ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d up (associated) bssid %pM aid %d\n",
+ arvif->vdev_id, bss_conf->bssid, vif->cfg.aid);
+
+ WARN_ON(arvif->is_up);
+
+ arvif->aid = vif->cfg.aid;
+ ether_addr_copy(arvif->bssid, bss_conf->bssid);
+
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->peer_stats_info_enable, 1);
+ if (ret)
+ ath10k_warn(ar, "failed to enable peer stats info: %d\n", ret);
+
+ ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev %d up: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ ath10k_mac_set_sar_power(ar);
+
+ /* Workaround: Some firmware revisions (tested with qca6174
+ * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
+ * poked with peer param command.
+ */
+ ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
+ ar->wmi.peer_param->dummy_var, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
+ arvif->bssid, arvif->vdev_id, ret);
+ return;
+ }
+}
+
+static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ieee80211_sta_vht_cap vht_cap = {};
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
+ arvif->vdev_id, arvif->bssid);
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to down vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->def_wep_key_idx = -1;
+
+ ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = false;
+
+ ath10k_mac_txpower_recalc(ar);
+
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+}
+
+static int ath10k_new_peer_tid_config(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct ath10k_vif *arvif)
+{
+ struct wmi_per_peer_per_tid_cfg_arg arg = {};
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ bool config_apply;
+ int ret, i;
+
+ for (i = 0; i < ATH10K_TID_MAX; i++) {
+ config_apply = false;
+ if (arvif->retry_long[i] || arvif->ampdu[i] ||
+ arvif->rate_ctrl[i] || arvif->rtscts[i]) {
+ config_apply = true;
+ arg.tid = i;
+ arg.vdev_id = arvif->vdev_id;
+ arg.retry_count = arvif->retry_long[i];
+ arg.aggr_control = arvif->ampdu[i];
+ arg.rate_ctrl = arvif->rate_ctrl[i];
+ arg.rcode_flags = arvif->rate_code[i];
+
+ if (arvif->rtscts[i])
+ arg.ext_tid_cfg_bitmap =
+ WMI_EXT_TID_RTS_CTS_CONFIG;
+ else
+ arg.ext_tid_cfg_bitmap = 0;
+
+ arg.rtscts_ctrl = arvif->rtscts[i];
+ }
+
+ if (arvif->noack[i]) {
+ arg.ack_policy = arvif->noack[i];
+ arg.rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE;
+ arg.aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_DISABLE;
+ config_apply = true;
+ }
+
+ /* Assign default value(-1) to newly connected station.
+ * This is to identify station specific tid configuration not
+ * configured for the station.
+ */
+ arsta->retry_long[i] = -1;
+ arsta->noack[i] = -1;
+ arsta->ampdu[i] = -1;
+
+ if (!config_apply)
+ continue;
+
+ ether_addr_copy(arg.peer_macaddr.addr, sta->addr);
+
+ ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to set per tid retry/aggr config for sta %pM: %d\n",
+ sta->addr, ret);
+ return ret;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ }
+
+ return 0;
+}
+
+static int ath10k_station_assoc(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool reassoc)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_peer_assoc_complete_arg peer_arg;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ /* Re-assoc is run only to update supported rates for given station. It
+ * doesn't make much sense to reconfigure the peer completely.
+ */
+ if (!reassoc) {
+ ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
+ &sta->deflink.ht_cap);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
+ if (ret) {
+ ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations++;
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ /* Plumb cached keys only for static WEP */
+ if ((arvif->def_wep_key_idx != -1) && (!sta->tdls)) {
+ ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+ }
+
+ if (!test_bit(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, ar->wmi.svc_map))
+ return ret;
+
+ return ath10k_new_peer_tid_config(ar, sta, arvif);
+}
+
+static int ath10k_station_disassoc(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations--;
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ ret = ath10k_clear_peer_keys(arvif, sta->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**************/
+/* Regulatory */
+/**************/
+
+static int ath10k_update_channel_list(struct ath10k *ar)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_supported_band **bands;
+ enum nl80211_band band;
+ struct ieee80211_channel *channel;
+ struct wmi_scan_chan_list_arg arg = {};
+ struct wmi_channel_arg *ch;
+ bool passive;
+ int len;
+ int ret;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ bands = hw->wiphy->bands;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ if (bands[band]->channels[i].flags &
+ IEEE80211_CHAN_DISABLED)
+ continue;
+
+ arg.n_channels++;
+ }
+ }
+
+ len = sizeof(struct wmi_channel_arg) * arg.n_channels;
+ arg.channels = kzalloc(len, GFP_KERNEL);
+ if (!arg.channels)
+ return -ENOMEM;
+
+ ch = arg.channels;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ channel = &bands[band]->channels[i];
+
+ if (channel->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ ch->allow_ht = true;
+
+ /* FIXME: when should we really allow VHT? */
+ ch->allow_vht = true;
+
+ ch->allow_ibss =
+ !(channel->flags & IEEE80211_CHAN_NO_IR);
+
+ ch->ht40plus =
+ !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
+
+ ch->chan_radar =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
+
+ passive = channel->flags & IEEE80211_CHAN_NO_IR;
+ ch->passive = passive;
+
+ /* the firmware is ignoring the "radar" flag of the
+ * channel and is scanning actively using Probe Requests
+ * on "Radar detection"/DFS channels which are not
+ * marked as "available"
+ */
+ ch->passive |= ch->chan_radar;
+
+ ch->freq = channel->center_freq;
+ ch->band_center_freq1 = channel->center_freq;
+ ch->min_power = 0;
+ ch->max_power = channel->max_power * 2;
+ ch->max_reg_power = channel->max_reg_power * 2;
+ ch->max_antenna_gain = channel->max_antenna_gain;
+ ch->reg_class_id = 0; /* FIXME */
+
+ /* FIXME: why use only legacy modes, why not any
+ * HT/VHT modes? Would that even make any
+ * difference?
+ */
+ if (channel->band == NL80211_BAND_2GHZ)
+ ch->mode = MODE_11G;
+ else
+ ch->mode = MODE_11A;
+
+ if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+ ch - arg.channels, arg.n_channels,
+ ch->freq, ch->max_power, ch->max_reg_power,
+ ch->max_antenna_gain, ch->mode);
+
+ ch++;
+ }
+ }
+
+ ret = ath10k_wmi_scan_chan_list(ar, &arg);
+ kfree(arg.channels);
+
+ return ret;
+}
+
+static enum wmi_dfs_region
+ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
+{
+ switch (dfs_region) {
+ case NL80211_DFS_UNSET:
+ return WMI_UNINIT_DFS_DOMAIN;
+ case NL80211_DFS_FCC:
+ return WMI_FCC_DFS_DOMAIN;
+ case NL80211_DFS_ETSI:
+ return WMI_ETSI_DFS_DOMAIN;
+ case NL80211_DFS_JP:
+ return WMI_MKK4_DFS_DOMAIN;
+ }
+ return WMI_UNINIT_DFS_DOMAIN;
+}
+
+static void ath10k_regd_update(struct ath10k *ar)
+{
+ struct reg_dmn_pair_mapping *regpair;
+ int ret;
+ enum wmi_dfs_region wmi_dfs_reg;
+ enum nl80211_dfs_regions nl_dfs_reg;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_update_channel_list(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to update channel list: %d\n", ret);
+
+ regpair = ar->ath_common.regulatory.regpair;
+
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ nl_dfs_reg = ar->dfs_detector->region;
+ wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
+ } else {
+ wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
+ }
+
+ /* Target allows setting up per-band regdomain but ath_common provides
+ * a combined one only
+ */
+ ret = ath10k_wmi_pdev_set_regdomain(ar,
+ regpair->reg_domain,
+ regpair->reg_domain, /* 2ghz */
+ regpair->reg_domain, /* 5ghz */
+ regpair->reg_2ghz_ctl,
+ regpair->reg_5ghz_ctl,
+ wmi_dfs_reg);
+ if (ret)
+ ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
+}
+
+static void ath10k_mac_update_channel_list(struct ath10k *ar,
+ struct ieee80211_supported_band *band)
+{
+ int i;
+
+ if (ar->low_5ghz_chan && ar->high_5ghz_chan) {
+ for (i = 0; i < band->n_channels; i++) {
+ if (band->channels[i].center_freq < ar->low_5ghz_chan ||
+ band->channels[i].center_freq > ar->high_5ghz_chan)
+ band->channels[i].flags |=
+ IEEE80211_CHAN_DISABLED;
+ }
+ }
+}
+
+static void ath10k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath10k *ar = hw->priv;
+ bool result;
+
+ ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
+
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
+ request->dfs_region);
+ result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
+ request->dfs_region);
+ if (!result)
+ ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
+ request->dfs_region);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state == ATH10K_STATE_ON)
+ ath10k_regd_update(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ ath10k_mac_update_channel_list(ar,
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ]);
+}
+
+static void ath10k_stop_radar_confirmation(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_STOPPED;
+ spin_unlock_bh(&ar->data_lock);
+
+ cancel_work_sync(&ar->radar_confirmation_work);
+}
+
+/***************/
+/* TX handlers */
+/***************/
+
+enum ath10k_mac_tx_path {
+ ATH10K_MAC_TX_HTT,
+ ATH10K_MAC_TX_HTT_MGMT,
+ ATH10K_MAC_TX_WMI_MGMT,
+ ATH10K_MAC_TX_UNKNOWN,
+};
+
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
+{
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+ ar->tx_paused |= BIT(reason);
+ ieee80211_stop_queues(ar->hw);
+}
+
+static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = data;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+
+ if (arvif->tx_paused)
+ return;
+
+ ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+}
+
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
+{
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+ ar->tx_paused &= ~BIT(reason);
+
+ if (ar->tx_paused)
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ ATH10K_ITER_RESUME_FLAGS,
+ ath10k_mac_tx_unlock_iter,
+ ar);
+
+ ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
+}
+
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= BITS_PER_LONG);
+ arvif->tx_paused |= BIT(reason);
+ ieee80211_stop_queue(ar->hw, arvif->vdev_id);
+}
+
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= BITS_PER_LONG);
+ arvif->tx_paused &= ~BIT(reason);
+
+ if (ar->tx_paused)
+ return;
+
+ if (arvif->tx_paused)
+ return;
+
+ ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+}
+
+static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ switch (action) {
+ case WMI_TLV_TX_PAUSE_ACTION_STOP:
+ ath10k_mac_vif_tx_lock(arvif, pause_id);
+ break;
+ case WMI_TLV_TX_PAUSE_ACTION_WAKE:
+ ath10k_mac_vif_tx_unlock(arvif, pause_id);
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "received unknown tx pause action %d on vdev %i, ignoring\n",
+ action, arvif->vdev_id);
+ break;
+ }
+}
+
+struct ath10k_mac_tx_pause {
+ u32 vdev_id;
+ enum wmi_tlv_tx_pause_id pause_id;
+ enum wmi_tlv_tx_pause_action action;
+};
+
+static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_mac_tx_pause *arg = data;
+
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
+ ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
+}
+
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action)
+{
+ struct ath10k_mac_tx_pause arg = {
+ .vdev_id = vdev_id,
+ .pause_id = pause_id,
+ .action = action,
+ };
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ ATH10K_ITER_RESUME_FLAGS,
+ ath10k_mac_handle_tx_pause_iter,
+ &arg);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+static enum ath10k_hw_txrx_mode
+ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+{
+ const struct ieee80211_hdr *hdr = (void *)skb->data;
+ const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+ __le16 fc = hdr->frame_control;
+
+ if (IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+ return ATH10K_HW_TXRX_ETHERNET;
+
+ if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
+ return ATH10K_HW_TXRX_RAW;
+
+ if (ieee80211_is_mgmt(fc))
+ return ATH10K_HW_TXRX_MGMT;
+
+ /* Workaround:
+ *
+ * NullFunc frames are mostly used to ping if a client or AP are still
+ * reachable and responsive. This implies tx status reports must be
+ * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
+ * come to a conclusion that the other end disappeared and tear down
+ * BSS connection or it can never disconnect from BSS/client (which is
+ * the case).
+ *
+ * Firmware with HTT older than 3.0 delivers incorrect tx status for
+ * NullFunc frames to driver. However there's a HTT Mgmt Tx command
+ * which seems to deliver correct tx reports for NullFunc frames. The
+ * downside of using it is it ignores client powersave state so it can
+ * end up disconnecting sleeping clients in AP mode. It should fix STA
+ * mode though because AP don't sleep.
+ */
+ if (ar->htt.target_version_major < 3 &&
+ (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
+ !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->running_fw->fw_file.fw_features))
+ return ATH10K_HW_TXRX_MGMT;
+
+ /* Workaround:
+ *
+ * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
+ * NativeWifi txmode - it selects AP key instead of peer key. It seems
+ * to work with Ethernet txmode so use it.
+ *
+ * FIXME: Check if raw mode works with TDLS.
+ */
+ if (ieee80211_is_data_present(fc) && sta && sta->tdls)
+ return ATH10K_HW_TXRX_ETHERNET;
+
+ if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) ||
+ skb_cb->flags & ATH10K_SKB_F_RAW_TX)
+ return ATH10K_HW_TXRX_RAW;
+
+ return ATH10K_HW_TXRX_NATIVE_WIFI;
+}
+
+static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ const struct ieee80211_hdr *hdr = (void *)skb->data;
+ const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
+ IEEE80211_TX_CTL_INJECTED;
+
+ if (!ieee80211_has_protected(hdr->frame_control))
+ return false;
+
+ if ((info->flags & mask) == mask)
+ return false;
+
+ if (vif)
+ return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt;
+
+ return true;
+}
+
+/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
+ * Control in the header.
+ */
+static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+ u8 *qos_ctl;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return;
+
+ qos_ctl = ieee80211_get_qos_ctl(hdr);
+ memmove(skb->data + IEEE80211_QOS_CTL_LEN,
+ skb->data, (void *)qos_ctl - (void *)skb->data);
+ skb_pull(skb, IEEE80211_QOS_CTL_LEN);
+
+ /* Some firmware revisions don't handle sending QoS NullFunc well.
+ * These frames are mainly used for CQM purposes so it doesn't really
+ * matter whether QoS NullFunc or NullFunc are sent.
+ */
+ hdr = (void *)skb->data;
+ if (ieee80211_is_qos_nullfunc(hdr->frame_control))
+ cb->flags &= ~ATH10K_SKB_F_QOS;
+
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+}
+
+static void ath10k_tx_h_8023(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct rfc1042_hdr *rfc1042;
+ struct ethhdr *eth;
+ size_t hdrlen;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ __be16 type;
+
+ hdr = (void *)skb->data;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ rfc1042 = (void *)skb->data + hdrlen;
+
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
+ type = rfc1042->snap_type;
+
+ skb_pull(skb, hdrlen + sizeof(*rfc1042));
+ skb_push(skb, sizeof(*eth));
+
+ eth = (void *)skb->data;
+ ether_addr_copy(eth->h_dest, da);
+ ether_addr_copy(eth->h_source, sa);
+ eth->h_proto = type;
+}
+
+static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+
+ /* This is case only for P2P_GO */
+ if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
+ return;
+
+ if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
+ spin_lock_bh(&ar->data_lock);
+ if (arvif->u.ap.noa_data)
+ if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
+ GFP_ATOMIC))
+ skb_put_data(skb, arvif->u.ap.noa_data,
+ arvif->u.ap.noa_len);
+ spin_unlock_bh(&ar->data_lock);
+ }
+}
+
+static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_txq *txq,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb, u16 airtime)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+ const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool is_data = ieee80211_is_data(hdr->frame_control) ||
+ ieee80211_is_data_qos(hdr->frame_control);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_sta *arsta;
+ u8 tid, *qos_ctl;
+ bool noack = false;
+
+ cb->flags = 0;
+
+ if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+ cb->flags |= ATH10K_SKB_F_QOS; /* Assume data frames are QoS */
+ goto finish_cb_fill;
+ }
+
+ if (!ath10k_tx_h_use_hwcrypto(vif, skb))
+ cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ cb->flags |= ATH10K_SKB_F_MGMT;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ cb->flags |= ATH10K_SKB_F_QOS;
+ qos_ctl = ieee80211_get_qos_ctl(hdr);
+ tid = (*qos_ctl) & IEEE80211_QOS_CTL_TID_MASK;
+
+ if (arvif->noack[tid] == WMI_PEER_TID_CONFIG_NOACK)
+ noack = true;
+
+ if (sta) {
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+
+ if (arsta->noack[tid] == WMI_PEER_TID_CONFIG_NOACK)
+ noack = true;
+
+ if (arsta->noack[tid] == WMI_PEER_TID_CONFIG_ACK)
+ noack = false;
+ }
+
+ if (noack)
+ cb->flags |= ATH10K_SKB_F_NOACK_TID;
+ }
+
+ /* Data frames encrypted in software will be posted to firmware
+ * with tx encap mode set to RAW. Ex: Multicast traffic generated
+ * for a specific VLAN group will always be encrypted in software.
+ */
+ if (is_data && ieee80211_has_protected(hdr->frame_control) &&
+ !info->control.hw_key) {
+ cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
+ cb->flags |= ATH10K_SKB_F_RAW_TX;
+ }
+
+finish_cb_fill:
+ cb->vif = vif;
+ cb->txq = txq;
+ cb->airtime_est = airtime;
+ if (sta) {
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ spin_lock_bh(&ar->data_lock);
+ cb->ucast_cipher = arsta->ucast_cipher;
+ spin_unlock_bh(&ar->data_lock);
+ }
+}
+
+bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
+{
+ /* FIXME: Not really sure since when the behaviour changed. At some
+ * point new firmware stopped requiring creation of peer entries for
+ * offchannel tx (and actually creating them causes issues with wmi-htc
+ * tx credit replenishment and reliability). Assuming it's at least 3.4
+ * because that's when the `freq` was introduced to TX_FRM HTT command.
+ */
+ return (ar->htt.target_version_major >= 3 &&
+ ar->htt.target_version_minor >= 4 &&
+ ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
+}
+
+static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
+
+ if (skb_queue_len_lockless(q) >= ATH10K_MAX_NUM_MGMT_PENDING) {
+ ath10k_warn(ar, "wmi mgmt tx queue is full\n");
+ return -ENOSPC;
+ }
+
+ skb_queue_tail(q, skb);
+ ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+
+ return 0;
+}
+
+static enum ath10k_mac_tx_path
+ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
+ struct sk_buff *skb,
+ enum ath10k_hw_txrx_mode txmode)
+{
+ switch (txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ case ATH10K_HW_TXRX_ETHERNET:
+ return ATH10K_MAC_TX_HTT;
+ case ATH10K_HW_TXRX_MGMT:
+ if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->running_fw->fw_file.fw_features) ||
+ test_bit(WMI_SERVICE_MGMT_TX_WMI,
+ ar->wmi.svc_map))
+ return ATH10K_MAC_TX_WMI_MGMT;
+ else if (ar->htt.target_version_major >= 3)
+ return ATH10K_MAC_TX_HTT;
+ else
+ return ATH10K_MAC_TX_HTT_MGMT;
+ }
+
+ return ATH10K_MAC_TX_UNKNOWN;
+}
+
+static int ath10k_mac_tx_submit(struct ath10k *ar,
+ enum ath10k_hw_txrx_mode txmode,
+ enum ath10k_mac_tx_path txpath,
+ struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ int ret = -EINVAL;
+
+ switch (txpath) {
+ case ATH10K_MAC_TX_HTT:
+ ret = ath10k_htt_tx(htt, txmode, skb);
+ break;
+ case ATH10K_MAC_TX_HTT_MGMT:
+ ret = ath10k_htt_mgmt_tx(htt, skb);
+ break;
+ case ATH10K_MAC_TX_WMI_MGMT:
+ ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+ break;
+ case ATH10K_MAC_TX_UNKNOWN:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+
+ return ret;
+}
+
+/* This function consumes the sk_buff regardless of return value as far as
+ * caller is concerned so no freeing is necessary afterwards.
+ */
+static int ath10k_mac_tx(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ enum ath10k_hw_txrx_mode txmode,
+ enum ath10k_mac_tx_path txpath,
+ struct sk_buff *skb, bool noque_offchan)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+ int ret;
+
+ /* We should disable CCK RATE due to P2P */
+ if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+
+ switch (txmode) {
+ case ATH10K_HW_TXRX_MGMT:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ ath10k_tx_h_nwifi(hw, skb);
+ ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
+ ath10k_tx_h_seq_no(vif, skb);
+ break;
+ case ATH10K_HW_TXRX_ETHERNET:
+ /* Convert 802.11->802.3 header only if the frame was earlier
+ * encapsulated to 802.11 by mac80211. Otherwise pass it as is.
+ */
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
+ ath10k_tx_h_8023(skb);
+ break;
+ case ATH10K_HW_TXRX_RAW:
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) &&
+ !(skb_cb->flags & ATH10K_SKB_F_RAW_TX)) {
+ WARN_ON_ONCE(1);
+ ieee80211_free_txskb(hw, skb);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ if (!ath10k_mac_tx_frm_has_freq(ar)) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %p len %d\n",
+ skb, skb->len);
+
+ skb_queue_tail(&ar->offchan_tx_queue, skb);
+ ieee80211_queue_work(hw, &ar->offchan_tx_work);
+ return 0;
+ }
+ }
+
+ ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit frame: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath10k_offchan_tx_purge(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->offchan_tx_queue);
+ if (!skb)
+ break;
+
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath10k_offchan_tx_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ struct sk_buff *skb;
+ const u8 *peer_addr;
+ int vdev_id;
+ int ret;
+ unsigned long time_left;
+ bool tmp_peer_created = false;
+
+ /* FW requirement: We must create a peer before FW will send out
+ * an offchannel frame. Otherwise the frame will be stuck and
+ * never transmitted. We delete the peer upon tx completion.
+ * It is unlikely that a peer for offchannel tx will already be
+ * present. However it may be in some rare cases so account for that.
+ * Otherwise we might remove a legitimate peer and break stuff.
+ */
+
+ for (;;) {
+ skb = skb_dequeue(&ar->offchan_tx_queue);
+ if (!skb)
+ break;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p len %d\n",
+ skb, skb->len);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ peer_addr = ieee80211_get_DA(hdr);
+
+ spin_lock_bh(&ar->data_lock);
+ vdev_id = ar->scan.vdev_id;
+ peer = ath10k_peer_find(ar, vdev_id, peer_addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (peer) {
+ ath10k_warn(ar, "peer %pM on vdev %d already present\n",
+ peer_addr, vdev_id);
+ } else {
+ ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
+ peer_addr,
+ WMI_PEER_TYPE_DEFAULT);
+ if (ret)
+ ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
+ peer_addr, vdev_id, ret);
+ tmp_peer_created = (ret == 0);
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ reinit_completion(&ar->offchan_tx_completed);
+ ar->offchan_tx_skb = skb;
+ spin_unlock_bh(&ar->data_lock);
+
+ /* It's safe to access vif and sta - conf_mutex guarantees that
+ * sta_state() and remove_interface() are locked exclusively
+ * out wrt to this offchannel worker.
+ */
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (arvif) {
+ vif = arvif->vif;
+ sta = ieee80211_find_sta(vif, peer_addr);
+ } else {
+ vif = NULL;
+ sta = NULL;
+ }
+
+ txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, true);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
+ ret);
+ /* not serious */
+ }
+
+ time_left =
+ wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
+ if (time_left == 0)
+ ath10k_warn(ar, "timed out waiting for offchannel skb %p, len: %d\n",
+ skb, skb->len);
+
+ if (!peer && tmp_peer_created) {
+ ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
+ if (ret)
+ ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
+ peer_addr, vdev_id, ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+}
+
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+ if (!skb)
+ break;
+
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+ if (!skb)
+ break;
+
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features)) {
+ paddr = dma_map_single(ar->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ar->dev, paddr)) {
+ ieee80211_free_txskb(ar->hw, skb);
+ continue;
+ }
+ ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
+ ret);
+ /* remove this msdu from idr tracking */
+ ath10k_wmi_cleanup_mgmt_tx_send(ar, skb);
+
+ dma_unmap_single(ar->dev, paddr, skb->len,
+ DMA_TO_DEVICE);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ } else {
+ ret = ath10k_wmi_mgmt_tx(ar, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ }
+ }
+}
+
+static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
+{
+ struct ath10k_txq *artxq;
+
+ if (!txq)
+ return;
+
+ artxq = (void *)txq->drv_priv;
+ INIT_LIST_HEAD(&artxq->list);
+}
+
+static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
+{
+ struct ath10k_skb_cb *cb;
+ struct sk_buff *msdu;
+ int msdu_id;
+
+ if (!txq)
+ return;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
+ cb = ATH10K_SKB_CB(msdu);
+ if (cb->txq == txq)
+ cb->txq = NULL;
+ }
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+ u16 peer_id,
+ u8 tid)
+{
+ struct ath10k_peer *peer;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ peer = ar->peer_map[peer_id];
+ if (!peer)
+ return NULL;
+
+ if (peer->removed)
+ return NULL;
+
+ if (peer->sta)
+ return peer->sta->txq[tid];
+ else if (peer->vif)
+ return peer->vif->txq;
+ else
+ return NULL;
+}
+
+static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+ /* No need to get locks */
+ if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
+ return true;
+
+ if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
+ return true;
+
+ if (artxq->num_fw_queued < artxq->num_push_allowed)
+ return true;
+
+ return false;
+}
+
+/* Return estimated airtime in microsecond, which is calculated using last
+ * reported TX rate. This is just a rough estimation because host driver has no
+ * knowledge of the actual transmit rate, retries or aggregation. If actual
+ * airtime can be reported by firmware, then delta between estimated and actual
+ * airtime can be adjusted from deficit.
+ */
+#define IEEE80211_ATF_OVERHEAD 100 /* IFS + some slot time */
+#define IEEE80211_ATF_OVERHEAD_IFS 16 /* IFS only */
+static u16 ath10k_mac_update_airtime(struct ath10k *ar,
+ struct ieee80211_txq *txq,
+ struct sk_buff *skb)
+{
+ struct ath10k_sta *arsta;
+ u32 pktlen;
+ u16 airtime = 0;
+
+ if (!txq || !txq->sta)
+ return airtime;
+
+ if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map))
+ return airtime;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta = (struct ath10k_sta *)txq->sta->drv_priv;
+
+ pktlen = skb->len + 38; /* Assume MAC header 30, SNAP 8 for most case */
+ if (arsta->last_tx_bitrate) {
+ /* airtime in us, last_tx_bitrate in 100kbps */
+ airtime = (pktlen * 8 * (1000 / 100))
+ / arsta->last_tx_bitrate;
+ /* overhead for media access time and IFS */
+ airtime += IEEE80211_ATF_OVERHEAD_IFS;
+ } else {
+ /* This is mostly for throttle excessive BC/MC frames, and the
+ * airtime/rate doesn't need be exact. Airtime of BC/MC frames
+ * in 2G get some discount, which helps prevent very low rate
+ * frames from being blocked for too long.
+ */
+ airtime = (pktlen * 8 * (1000 / 100)) / 60; /* 6M */
+ airtime += IEEE80211_ATF_OVERHEAD;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ return airtime;
+}
+
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_htt *htt = &ar->htt;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+ struct ieee80211_vif *vif = txq->vif;
+ struct ieee80211_sta *sta = txq->sta;
+ enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ size_t skb_len;
+ bool is_mgmt, is_presp;
+ int ret;
+ u16 airtime;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ret = ath10k_htt_tx_inc_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ if (ret)
+ return ret;
+
+ skb = ieee80211_tx_dequeue_ni(hw, txq);
+ if (!skb) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return -ENOENT;
+ }
+
+ airtime = ath10k_mac_update_airtime(ar, txq, skb);
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, sta, skb, airtime);
+
+ skb_len = skb->len;
+ txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+ is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
+
+ if (is_mgmt) {
+ hdr = (struct ieee80211_hdr *)skb->data;
+ is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+
+ if (ret) {
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ return ret;
+ }
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
+ if (unlikely(ret)) {
+ ath10k_warn(ar, "failed to push frame: %d\n", ret);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ if (is_mgmt)
+ ath10k_htt_tx_mgmt_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return ret;
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ artxq->num_fw_queued++;
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return skb_len;
+}
+
+static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac)
+{
+ struct ieee80211_txq *txq;
+ int ret = 0;
+
+ ieee80211_txq_schedule_start(hw, ac);
+ while ((txq = ieee80211_next_txq(hw, ac))) {
+ while (ath10k_mac_tx_can_push(hw, txq)) {
+ ret = ath10k_mac_tx_push_txq(hw, txq);
+ if (ret < 0)
+ break;
+ }
+ ieee80211_return_txq(hw, txq, false);
+ ath10k_htt_tx_txq_update(hw, txq);
+ if (ret == -EBUSY)
+ break;
+ }
+ ieee80211_txq_schedule_end(hw, ac);
+
+ return ret;
+}
+
+void ath10k_mac_tx_push_pending(struct ath10k *ar)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ u32 ac;
+
+ if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH)
+ return;
+
+ if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
+ return;
+
+ rcu_read_lock();
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ if (ath10k_mac_schedule_txq(hw, ac) == -EBUSY)
+ break;
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
+
+/************/
+/* Scanning */
+/************/
+
+void __ath10k_scan_finish(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ break;
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ if (ar->scan.is_roc && ar->scan.roc_notify)
+ ieee80211_remain_on_channel_expired(ar->hw);
+ fallthrough;
+ case ATH10K_SCAN_STARTING:
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = ((ar->scan.state ==
+ ATH10K_SCAN_ABORTING) ||
+ (ar->scan.state ==
+ ATH10K_SCAN_STARTING)),
+ };
+
+ ieee80211_scan_completed(ar->hw, &info);
+ }
+
+ ar->scan.state = ATH10K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ ath10k_offchan_tx_purge(ar);
+ cancel_delayed_work(&ar->scan.timeout);
+ complete(&ar->scan.completed);
+ break;
+ }
+}
+
+void ath10k_scan_finish(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ __ath10k_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_scan_stop(struct ath10k *ar)
+{
+ struct wmi_stop_scan_arg arg = {
+ .req_id = 1, /* FIXME */
+ .req_type = WMI_SCAN_STOP_ONE,
+ .u.scan_id = ATH10K_SCAN_ID,
+ };
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_stop_scan(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
+ ret = -ETIMEDOUT;
+ } else if (ret > 0) {
+ ret = 0;
+ }
+
+out:
+ /* Scan state should be updated upon scan completion but in case
+ * firmware fails to deliver the event (for whatever reason) it is
+ * desired to clean up scan state anyway. Firmware may have just
+ * dropped the scan completion event delivery due to transport pipe
+ * being overflown with data and/or it can recover on its own before
+ * next scan request is submitted.
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state != ATH10K_SCAN_IDLE)
+ __ath10k_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static void ath10k_scan_abort(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ /* This can happen if timeout worker kicked in and called
+ * abortion while scan completion was being processed.
+ */
+ break;
+ case ATH10K_SCAN_STARTING:
+ case ATH10K_SCAN_ABORTING:
+ ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_RUNNING:
+ ar->scan.state = ATH10K_SCAN_ABORTING;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_scan_stop(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to abort scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_scan_timeout_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ scan.timeout.work);
+
+ mutex_lock(&ar->conf_mutex);
+ ath10k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_start_scan(ar, arg);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
+ if (ret == 0) {
+ ret = ath10k_scan_stop(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to stop scan: %d\n", ret);
+
+ return -ETIMEDOUT;
+ }
+
+ /* If we failed to start the scan, return error code at
+ * this point. This is probably due to some issue in the
+ * firmware, but no need to wedge the driver due to that...
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH10K_SCAN_IDLE) {
+ spin_unlock_bh(&ar->data_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ return 0;
+}
+
+/**********************/
+/* mac80211 callbacks */
+/**********************/
+
+static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_htt *htt = &ar->htt;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_txq *txq = NULL;
+ enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
+ bool is_htt;
+ bool is_mgmt;
+ int ret;
+ u16 airtime;
+
+ airtime = ath10k_mac_update_airtime(ar, txq, skb);
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, sta, skb, airtime);
+
+ txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+ is_htt = (txpath == ATH10K_MAC_TX_HTT ||
+ txpath == ATH10K_MAC_TX_HTT_MGMT);
+ is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
+
+ if (is_htt) {
+ bool is_presp = false;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+ }
+
+ ret = ath10k_htt_tx_inc_pending(htt);
+ if (ret) {
+ ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
+ ret);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ ieee80211_free_txskb(ar->hw, skb);
+ return;
+ }
+
+ ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
+ ret);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ ieee80211_free_txskb(ar->hw, skb);
+ return;
+ }
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
+ if (is_htt) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ if (is_mgmt)
+ ath10k_htt_tx_mgmt_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+ return;
+ }
+}
+
+static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+ u8 ac = txq->ac;
+
+ ath10k_htt_tx_txq_update(hw, txq);
+ if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH)
+ return;
+
+ spin_lock_bh(&ar->queue_lock[ac]);
+
+ ieee80211_txq_schedule_start(hw, ac);
+ txq = ieee80211_next_txq(hw, ac);
+ if (!txq)
+ goto out;
+
+ while (ath10k_mac_tx_can_push(hw, txq)) {
+ ret = ath10k_mac_tx_push_txq(hw, txq);
+ if (ret < 0)
+ break;
+ }
+ ieee80211_return_txq(hw, txq, false);
+ ath10k_htt_tx_txq_update(hw, txq);
+out:
+ ieee80211_txq_schedule_end(hw, ac);
+ spin_unlock_bh(&ar->queue_lock[ac]);
+}
+
+/* Must not be called with conf_mutex held as workers can use that also. */
+void ath10k_drain_tx(struct ath10k *ar)
+{
+ lockdep_assert_not_held(&ar->conf_mutex);
+
+ /* make sure rcu-protected mac80211 tx path itself is drained */
+ synchronize_net();
+
+ ath10k_offchan_tx_purge(ar);
+ ath10k_mgmt_over_wmi_tx_purge(ar);
+
+ cancel_work_sync(&ar->offchan_tx_work);
+ cancel_work_sync(&ar->wmi_mgmt_tx_work);
+}
+
+void ath10k_halt(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ar->filter_flags = 0;
+ ar->monitor = false;
+ ar->monitor_arvif = NULL;
+
+ if (ar->monitor_started)
+ ath10k_monitor_stop(ar);
+
+ ar->monitor_started = false;
+ ar->tx_paused = 0;
+
+ ath10k_scan_finish(ar);
+ ath10k_peer_cleanup_all(ar);
+ ath10k_stop_radar_confirmation(ar);
+ ath10k_core_stop(ar);
+ ath10k_hif_power_down(ar);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ ath10k_mac_vif_beacon_cleanup(arvif);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ *tx_ant = ar->cfg_tx_chainmask;
+ *rx_ant = ar->cfg_rx_chainmask;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static bool ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
+{
+ /* It is not clear that allowing gaps in chainmask
+ * is helpful. Probably it will not do what user
+ * is hoping for, so warn in that case.
+ */
+ if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
+ return true;
+
+ ath10k_warn(ar, "mac %s antenna chainmask is invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
+ dbg, cm);
+ return false;
+}
+
+static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
+{
+ int nsts = ar->vht_cap_info;
+
+ nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+ nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+
+ /* If firmware does not deliver to host number of space-time
+ * streams supported, assume it support up to 4 BF STS and return
+ * the value for VHT CAP: nsts-1)
+ */
+ if (nsts == 0)
+ return 3;
+
+ return nsts;
+}
+
+static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
+{
+ int sound_dim = ar->vht_cap_info;
+
+ sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+
+ /* If the sounding dimension is not advertised by the firmware,
+ * let's use a default value of 1
+ */
+ if (sound_dim == 0)
+ return 1;
+
+ return sound_dim;
+}
+
+static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
+{
+ struct ieee80211_sta_vht_cap vht_cap = {};
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ u16 mcs_map;
+ u32 val;
+ int i;
+
+ vht_cap.vht_supported = 1;
+ vht_cap.cap = ar->vht_cap_info;
+
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+ val = ath10k_mac_get_vht_cap_bf_sts(ar);
+ val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+
+ vht_cap.cap |= val;
+ }
+
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+ val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
+ val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+
+ vht_cap.cap |= val;
+ }
+
+ mcs_map = 0;
+ for (i = 0; i < 8; i++) {
+ if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
+ mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+ }
+
+ if (ar->cfg_tx_chainmask <= 1)
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+
+ vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+ vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+
+ /* If we are supporting 160Mhz or 80+80, then the NIC may be able to do
+ * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give
+ * user-space a clue if that is the case.
+ */
+ if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) &&
+ (hw->vht160_mcs_rx_highest != 0 ||
+ hw->vht160_mcs_tx_highest != 0)) {
+ vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest);
+ vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest);
+ }
+
+ return vht_cap;
+}
+
+static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
+{
+ int i;
+ struct ieee80211_sta_ht_cap ht_cap = {};
+
+ if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
+ return ht_cap;
+
+ ht_cap.ht_supported = 1;
+ ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+ ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ ht_cap.cap |=
+ WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
+ u32 smps;
+
+ smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
+ smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ ht_cap.cap |= smps;
+ }
+
+ if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1))
+ ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
+ u32 stbc;
+
+ stbc = ar->ht_cap_info;
+ stbc &= WMI_HT_CAP_RX_STBC;
+ stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
+ stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc &= IEEE80211_HT_CAP_RX_STBC;
+
+ ht_cap.cap |= stbc;
+ }
+
+ if (ar->ht_cap_info & WMI_HT_CAP_LDPC || (ar->ht_cap_info &
+ WMI_HT_CAP_RX_LDPC && (ar->ht_cap_info & WMI_HT_CAP_TX_LDPC)))
+ ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
+ ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
+
+ /* max AMSDU is implicitly taken from vht_cap_info */
+ if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
+ ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ for (i = 0; i < ar->num_rf_chains; i++) {
+ if (ar->cfg_rx_chainmask & BIT(i))
+ ht_cap.mcs.rx_mask[i] = 0xFF;
+ }
+
+ ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+
+ return ht_cap;
+}
+
+static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
+{
+ struct ieee80211_supported_band *band;
+ struct ieee80211_sta_vht_cap vht_cap;
+ struct ieee80211_sta_ht_cap ht_cap;
+
+ ht_cap = ath10k_get_ht_cap(ar);
+ vht_cap = ath10k_create_vht_cap(ar);
+
+ if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->ht_cap = ht_cap;
+ }
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->ht_cap = ht_cap;
+ band->vht_cap = vht_cap;
+ }
+}
+
+static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
+{
+ int ret;
+ bool is_valid_tx_chain_mask, is_valid_rx_chain_mask;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ is_valid_tx_chain_mask = ath10k_check_chain_mask(ar, tx_ant, "tx");
+ is_valid_rx_chain_mask = ath10k_check_chain_mask(ar, rx_ant, "rx");
+
+ if (!is_valid_tx_chain_mask || !is_valid_rx_chain_mask)
+ return -EINVAL;
+
+ ar->cfg_tx_chainmask = tx_ant;
+ ar->cfg_rx_chainmask = rx_ant;
+
+ if ((ar->state != ATH10K_STATE_ON) &&
+ (ar->state != ATH10K_STATE_RESTARTED))
+ return 0;
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
+ tx_ant);
+ if (ret) {
+ ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
+ ret, tx_ant);
+ return ret;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
+ rx_ant);
+ if (ret) {
+ ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
+ ret, rx_ant);
+ return ret;
+ }
+
+ /* Reload HT/VHT capability */
+ ath10k_mac_setup_ht_vht_cap(ar);
+
+ return 0;
+}
+
+static int ath10k_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int __ath10k_fetch_bb_timing_dt(struct ath10k *ar,
+ struct wmi_bb_timing_cfg_arg *bb_timing)
+{
+ struct device_node *node;
+ const char *fem_name;
+ int ret;
+
+ node = ar->dev->of_node;
+ if (!node)
+ return -ENOENT;
+
+ ret = of_property_read_string_index(node, "ext-fem-name", 0, &fem_name);
+ if (ret)
+ return -ENOENT;
+
+ /*
+ * If external Front End module used in hardware, then default base band timing
+ * parameter cannot be used since they were fine tuned for reference hardware,
+ * so choosing different value suitable for that external FEM.
+ */
+ if (!strcmp("microsemi-lx5586", fem_name)) {
+ bb_timing->bb_tx_timing = 0x00;
+ bb_timing->bb_xpa_timing = 0x0101;
+ } else {
+ return -ENOENT;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot bb_tx_timing 0x%x bb_xpa_timing 0x%x\n",
+ bb_timing->bb_tx_timing, bb_timing->bb_xpa_timing);
+ return 0;
+}
+
+static int ath10k_mac_rfkill_config(struct ath10k *ar)
+{
+ u32 param;
+ int ret;
+
+ if (ar->hw_values->rfkill_pin == 0) {
+ ath10k_warn(ar, "ath10k does not support hardware rfkill with this device\n");
+ return -EOPNOTSUPP;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d",
+ ar->hw_values->rfkill_pin, ar->hw_values->rfkill_cfg,
+ ar->hw_values->rfkill_on_level);
+
+ param = FIELD_PREP(WMI_TLV_RFKILL_CFG_RADIO_LEVEL,
+ ar->hw_values->rfkill_on_level) |
+ FIELD_PREP(WMI_TLV_RFKILL_CFG_GPIO_PIN_NUM,
+ ar->hw_values->rfkill_pin) |
+ FIELD_PREP(WMI_TLV_RFKILL_CFG_PIN_AS_GPIO,
+ ar->hw_values->rfkill_cfg);
+
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->rfkill_config,
+ param);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to set rfkill config 0x%x: %d\n",
+ param, ret);
+ return ret;
+ }
+ return 0;
+}
+
+int ath10k_mac_rfkill_enable_radio(struct ath10k *ar, bool enable)
+{
+ enum wmi_tlv_rfkill_enable_radio param;
+ int ret;
+
+ if (enable)
+ param = WMI_TLV_RFKILL_ENABLE_RADIO_ON;
+ else
+ param = WMI_TLV_RFKILL_ENABLE_RADIO_OFF;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac rfkill enable %d", param);
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rfkill_enable,
+ param);
+ if (ret) {
+ ath10k_warn(ar, "failed to set rfkill enable param %d: %d\n",
+ param, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_start(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+ u32 param;
+ int ret = 0;
+ struct wmi_bb_timing_cfg_arg bb_timing = {};
+
+ /*
+ * This makes sense only when restarting hw. It is harmless to call
+ * unconditionally. This is necessary to make sure no HTT/WMI tx
+ * commands will be submitted while restarting.
+ */
+ ath10k_drain_tx(ar);
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH10K_STATE_OFF:
+ ar->state = ATH10K_STATE_ON;
+ break;
+ case ATH10K_STATE_RESTARTING:
+ ar->state = ATH10K_STATE_RESTARTED;
+ break;
+ case ATH10K_STATE_ON:
+ case ATH10K_STATE_RESTARTED:
+ case ATH10K_STATE_WEDGED:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto err;
+ case ATH10K_STATE_UTF:
+ ret = -EBUSY;
+ goto err;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (ar->hw_rfkill_on) {
+ ar->hw_rfkill_on = false;
+ spin_unlock_bh(&ar->data_lock);
+ goto err;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+ if (ret) {
+ ath10k_err(ar, "Could not init hif: %d\n", ret);
+ goto err_off;
+ }
+
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+ &ar->normal_mode_fw);
+ if (ret) {
+ ath10k_err(ar, "Could not init core: %d\n", ret);
+ goto err_power_down;
+ }
+
+ if (ar->sys_cap_info & WMI_TLV_SYS_CAP_INFO_RFKILL) {
+ ret = ath10k_mac_rfkill_config(ar);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to configure rfkill: %d", ret);
+ goto err_core_stop;
+ }
+ }
+
+ param = ar->wmi.pdev_param->pmf_qos;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
+ goto err_core_stop;
+ }
+
+ param = ar->wmi.pdev_param->dynamic_bw;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
+ goto err_core_stop;
+ }
+
+ if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
+ ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
+ if (ret) {
+ ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
+ goto err_core_stop;
+ }
+ }
+
+ if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+ ret = ath10k_wmi_adaptive_qcs(ar, true);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+ }
+
+ if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
+ param = ar->wmi.pdev_param->burst_enable;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 0);
+ if (ret) {
+ ath10k_warn(ar, "failed to disable burst: %d\n", ret);
+ goto err_core_stop;
+ }
+ }
+
+ param = ar->wmi.pdev_param->idle_ps_config;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 1);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret);
+ goto err_core_stop;
+ }
+
+ __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
+
+ /*
+ * By default FW set ARP frames ac to voice (6). In that case ARP
+ * exchange is not working properly for UAPSD enabled AP. ARP requests
+ * which arrives with access category 0 are processed by network stack
+ * and send back with access category 0, but FW changes access category
+ * to 6. Set ARP frames access category to best effort (0) solves
+ * this problem.
+ */
+
+ param = ar->wmi.pdev_param->arp_ac_override;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 0);
+ if (ret) {
+ ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+
+ if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
+ ar->running_fw->fw_file.fw_features)) {
+ ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
+ WMI_CCA_DETECT_LEVEL_AUTO,
+ WMI_CCA_DETECT_MARGIN_AUTO);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+ }
+
+ param = ar->wmi.pdev_param->ani_enable;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable ani by default: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+
+ ar->ani_enabled = true;
+
+ if (ath10k_peer_stats_enabled(ar)) {
+ param = ar->wmi.pdev_param->peer_stats_update_period;
+ ret = ath10k_wmi_pdev_set_param(ar, param,
+ PEER_DEFAULT_STATS_UPDATE_PERIOD);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to set peer stats period : %d\n",
+ ret);
+ goto err_core_stop;
+ }
+ }
+
+ param = ar->wmi.pdev_param->enable_btcoex;
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
+ test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+ ar->running_fw->fw_file.fw_features) &&
+ ar->coex_support) {
+ ret = ath10k_wmi_pdev_set_param(ar, param, 0);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to set btcoex param: %d\n", ret);
+ goto err_core_stop;
+ }
+ clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
+ }
+
+ if (test_bit(WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, ar->wmi.svc_map)) {
+ ret = __ath10k_fetch_bb_timing_dt(ar, &bb_timing);
+ if (!ret) {
+ ret = ath10k_wmi_pdev_bb_timing(ar, &bb_timing);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to set bb timings: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+ }
+ }
+
+ ar->num_started_vdevs = 0;
+ ath10k_regd_update(ar);
+
+ ath10k_spectral_start(ar);
+ ath10k_thermal_set_throttling(ar);
+
+ ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_core_stop:
+ ath10k_core_stop(ar);
+
+err_power_down:
+ ath10k_hif_power_down(ar);
+
+err_off:
+ ar->state = ATH10K_STATE_OFF;
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath10k_stop(struct ieee80211_hw *hw, bool suspend)
+{
+ struct ath10k *ar = hw->priv;
+ u32 opt;
+
+ ath10k_drain_tx(ar);
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH10K_STATE_OFF) {
+ if (!ar->hw_rfkill_on) {
+ /* If the current driver state is RESTARTING but not yet
+ * fully RESTARTED because of incoming suspend event,
+ * then ath10k_halt() is already called via
+ * ath10k_core_restart() and should not be called here.
+ */
+ if (ar->state != ATH10K_STATE_RESTARTING) {
+ ath10k_halt(ar);
+ } else {
+ /* Suspending here, because when in RESTARTING
+ * state, ath10k_core_stop() skips
+ * ath10k_wait_for_suspend().
+ */
+ opt = WMI_PDEV_SUSPEND_AND_DISABLE_INTR;
+ ath10k_wait_for_suspend(ar, opt);
+ }
+ }
+ ar->state = ATH10K_STATE_OFF;
+ }
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_work_sync(&ar->set_coverage_class_work);
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->restart_work);
+ cancel_work_sync(&ar->recovery_check_work);
+}
+
+static int ath10k_config_ps(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_mac_vif_setup_ps(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int ath10k_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_PS)
+ ath10k_config_ps(ar);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
+ ret = ath10k_monitor_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static u32 get_nss_from_chainmask(u16 chain_mask)
+{
+ if ((chain_mask & 0xf) == 0xf)
+ return 4;
+ else if ((chain_mask & 0x7) == 0x7)
+ return 3;
+ else if ((chain_mask & 0x3) == 0x3)
+ return 2;
+ return 1;
+}
+
+static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
+{
+ u32 value = 0;
+ struct ath10k *ar = arvif->ar;
+ int nsts;
+ int sound_dim;
+
+ if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
+ return 0;
+
+ nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
+ value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
+
+ sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
+ value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
+
+ if (!value)
+ return 0;
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+ value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
+ WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+ value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
+ WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
+
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ ar->wmi.vdev_param->txbf, value);
+}
+
+static void ath10k_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k *ar = hw->priv;
+ u32 vdev_param;
+ int ret;
+
+ if (ath10k_frame_mode != ATH10K_HW_TXRX_ETHERNET ||
+ ar->wmi.vdev_param->tx_encap_type == WMI_VDEV_PARAM_UNSUPPORTED ||
+ (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP))
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+
+ vdev_param = ar->wmi.vdev_param->tx_encap_type;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ ATH10K_HW_TXRX_NATIVE_WIFI);
+ /* 10.X firmware does not support this VDEV parameter. Do not warn */
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
+ arvif->vdev_id, ret);
+ }
+}
+
+/*
+ * TODO:
+ * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ * because we will send mgmt frames without CCK. This requirement
+ * for P2P_FIND/GO_NEG should be handled by checking CCK flag
+ * in the TX packet.
+ */
+static int ath10k_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_peer *peer;
+ enum wmi_sta_powersave_param param;
+ int ret = 0;
+ u32 value;
+ int bit;
+ int i;
+ u32 vdev_param;
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ mutex_lock(&ar->conf_mutex);
+
+ memset(arvif, 0, sizeof(*arvif));
+ ath10k_mac_txq_init(vif->txq);
+
+ arvif->ar = ar;
+ arvif->vif = vif;
+
+ INIT_LIST_HEAD(&arvif->list);
+ INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
+ INIT_DELAYED_WORK(&arvif->connection_loss_work,
+ ath10k_mac_vif_sta_connection_loss_work);
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
+
+ if (ar->num_peers >= ar->max_num_peers) {
+ ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
+ ret = -ENOBUFS;
+ goto err;
+ }
+
+ if (ar->free_vdev_map == 0) {
+ ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
+ ret = -EBUSY;
+ goto err;
+ }
+ bit = __ffs64(ar->free_vdev_map);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
+ bit, ar->free_vdev_map);
+
+ arvif->vdev_id = bit;
+ arvif->vdev_subtype =
+ ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_STATION:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ if (vif->p2p)
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_MESH_11S);
+ } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ ret = -EINVAL;
+ ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
+ goto err;
+ }
+ arvif->vdev_type = WMI_VDEV_TYPE_AP;
+ break;
+ case NL80211_IFTYPE_AP:
+ arvif->vdev_type = WMI_VDEV_TYPE_AP;
+
+ if (vif->p2p)
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_P2P_GO);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ /* Using vdev_id as queue number will make it very easy to do per-vif
+ * tx queue locking. This shouldn't wrap due to interface combinations
+ * but do a modulo for correctness sake and prevent using offchannel tx
+ * queues for regular vif tx.
+ */
+ vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+
+ /* Some firmware revisions don't wait for beacon tx completion before
+ * sending another SWBA event. This could lead to hardware using old
+ * (freed) beacon data in some cases, e.g. tx credit starvation
+ * combined with missed TBTT. This is very rare.
+ *
+ * On non-IOMMU-enabled hosts this could be a possible security issue
+ * because hw could beacon some random data on the air. On
+ * IOMMU-enabled hosts DMAR faults would occur in most cases and target
+ * device would crash.
+ *
+ * Since there are no beacon tx completions (implicit nor explicit)
+ * propagated to host the only workaround for this is to allocate a
+ * DMA-coherent buffer for a lifetime of a vif and use it for all
+ * beacon tx commands. Worst case for this approach is some beacons may
+ * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_AP) {
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
+ arvif->beacon_buf = kmalloc(IEEE80211_MAX_FRAME_LEN,
+ GFP_KERNEL);
+
+ /* Using a kernel pointer in place of a dma_addr_t
+ * token can lead to undefined behavior if that
+ * makes it into cache management functions. Use a
+ * known-invalid address token instead, which
+ * avoids the warning and makes it easier to catch
+ * bugs if it does end up getting used.
+ */
+ arvif->beacon_paddr = DMA_MAPPING_ERROR;
+ } else {
+ arvif->beacon_buf =
+ dma_alloc_coherent(ar->dev,
+ IEEE80211_MAX_FRAME_LEN,
+ &arvif->beacon_paddr,
+ GFP_ATOMIC);
+ }
+ if (!arvif->beacon_buf) {
+ ret = -ENOMEM;
+ ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
+ ret);
+ goto err;
+ }
+ }
+ if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
+ arvif->nohwcrypt = true;
+
+ if (arvif->nohwcrypt &&
+ !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ ret = -EINVAL;
+ ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
+ goto err;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
+ arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
+ arvif->beacon_buf ? "single-buf" : "per-skb");
+
+ ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
+ arvif->vdev_subtype, vif->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ if (test_bit(WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT,
+ ar->wmi.svc_map)) {
+ vdev_param = ar->wmi.vdev_param->disable_4addr_src_lrn;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ WMI_VDEV_DISABLE_4_ADDR_SRC_LRN);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to disable 4addr src lrn vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+ }
+
+ ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
+ spin_lock_bh(&ar->data_lock);
+ list_add(&arvif->list, &ar->arvifs);
+ spin_unlock_bh(&ar->data_lock);
+
+ /* It makes no sense to have firmware do keepalives. mac80211 already
+ * takes care of this with idle connection polling.
+ */
+ ret = ath10k_mac_vif_disable_keepalive(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_delete;
+ }
+
+ arvif->def_wep_key_idx = -1;
+
+ ath10k_update_vif_offload(hw, vif);
+
+ /* Configuring number of spatial stream for monitor interface is causing
+ * target assert in qca9888 and qca6174.
+ */
+ if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
+ u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+
+ vdev_param = ar->wmi.vdev_param->nss;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ nss);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
+ arvif->vdev_id, ar->cfg_tx_chainmask, nss,
+ ret);
+ goto err_vdev_delete;
+ }
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
+ vif->addr, WMI_PEER_TYPE_DEFAULT);
+ if (ret) {
+ ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_delete;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
+ if (!peer) {
+ ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+ vif->addr, arvif->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ ret = -ENOENT;
+ goto err_peer_delete;
+ }
+
+ arvif->peer_id = find_first_bit(peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS);
+
+ spin_unlock_bh(&ar->data_lock);
+ } else {
+ arvif->peer_id = HTT_INVALID_PEERID;
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath10k_mac_set_kickout(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
+ value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+
+ ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+
+ ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+ }
+
+ ret = ath10k_mac_set_txbf_conf(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+
+ ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
+ if (ret) {
+ ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+
+ arvif->txpower = vif->bss_conf.txpower;
+ ret = ath10k_mac_txpower_recalc(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+ goto err_peer_delete;
+ }
+
+ if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) {
+ vdev_param = ar->wmi.vdev_param->rtt_responder_role;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ arvif->ftm_responder);
+
+ /* It is harmless to not set FTM role. Do not warn */
+ if (ret && ret != -EOPNOTSUPP)
+ ath10k_warn(ar, "failed to set vdev %i FTM Responder: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ar->monitor_arvif = arvif;
+ ret = ath10k_monitor_recalc(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+ goto err_peer_delete;
+ }
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ if (!ar->tx_paused)
+ ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_peer_delete:
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
+ ath10k_wait_for_peer_delete_done(ar, arvif->vdev_id,
+ vif->addr);
+ }
+
+err_vdev_delete:
+ ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+ ar->free_vdev_map |= 1LL << arvif->vdev_id;
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+err:
+ if (arvif->beacon_buf) {
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
+ kfree(arvif->beacon_buf);
+ else
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf,
+ arvif->beacon_paddr);
+ arvif->beacon_buf = NULL;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
+{
+ int i;
+
+ for (i = 0; i < BITS_PER_LONG; i++)
+ ath10k_mac_vif_tx_unlock(arvif, i);
+}
+
+static void ath10k_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_peer *peer;
+ int ret;
+ int i;
+
+ cancel_work_sync(&arvif->ap_csa_work);
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath10k_spectral_vif_stop(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ ar->free_vdev_map |= 1LL << arvif->vdev_id;
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
+ vif->addr);
+ if (ret)
+ ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ ath10k_wait_for_peer_delete_done(ar, arvif->vdev_id,
+ vif->addr);
+ kfree(arvif->u.ap.noa_data);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
+ arvif->vdev_id);
+
+ ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ ret = ath10k_vdev_delete_sync(ar);
+ if (ret) {
+ ath10k_warn(ar, "Error in receiving vdev delete response: %d\n", ret);
+ goto out;
+ }
+
+ /* Some firmware revisions don't notify host about self-peer removal
+ * until after associated vdev is deleted.
+ */
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
+ vif->addr);
+ if (ret)
+ ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->num_peers--;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ peer = ar->peer_map[i];
+ if (!peer)
+ continue;
+
+ if (peer->vif == vif) {
+ ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ peer->vif = NULL;
+ }
+ }
+
+ /* Clean this up late, less opportunity for firmware to access
+ * DMA memory we have deleted.
+ */
+ ath10k_mac_vif_beacon_cleanup(arvif);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_peer_cleanup(ar, arvif->vdev_id);
+ ath10k_mac_txq_unref(ar, vif->txq);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ar->monitor_arvif = NULL;
+ ret = ath10k_monitor_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+ }
+
+ ret = ath10k_mac_txpower_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_mac_vif_tx_unlock_all(arvif);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ ath10k_mac_txq_unref(ar, vif->txq);
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/*
+ * FIXME: Has to be verified.
+ */
+#define SUPPORTED_FILTERS \
+ (FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PROBE_REQ | \
+ FIF_FCSFAIL)
+
+static void ath10k_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+ unsigned int supported = SUPPORTED_FILTERS;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->hw_params.mcast_frame_registration)
+ supported |= FIF_MCAST_ACTION;
+
+ *total_flags &= supported;
+
+ ar->filter_flags = *total_flags;
+
+ ret = ath10k_monitor_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_recalculate_mgmt_rate(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ const struct ieee80211_supported_band *sband;
+ u8 basic_rate_idx;
+ int hw_rate_code;
+ u32 vdev_param;
+ u16 bitrate;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ sband = ar->hw->wiphy->bands[def->chan->band];
+ basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ bitrate = sband->bitrates[basic_rate_idx].bitrate;
+
+ hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
+ if (hw_rate_code < 0) {
+ ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
+ return;
+ }
+
+ vdev_param = ar->wmi.vdev_param->mgmt_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
+}
+
+static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ u32 vdev_param, pdev_param, slottime, preamble;
+ u16 bitrate, hw_value;
+ u8 rate, rateidx;
+ int ret = 0, mcast_rate;
+ enum nl80211_band band;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & BSS_CHANGED_IBSS)
+ ath10k_control_ibss(arvif, vif);
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ arvif->beacon_interval = info->beacon_int;
+ vdev_param = ar->wmi.vdev_param->beacon_interval;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ arvif->beacon_interval);
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d beacon_interval %d\n",
+ arvif->vdev_id, arvif->beacon_interval);
+
+ if (ret)
+ ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_BEACON) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "vdev %d set beacon tx mode to staggered\n",
+ arvif->vdev_id);
+
+ pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
+ ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
+ WMI_BEACON_STAGGERED_MODE);
+ if (ret)
+ ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update beacon template: %d\n",
+ ret);
+
+ if (ieee80211_vif_is_mesh(vif)) {
+ /* mesh doesn't use SSID but firmware needs it */
+ arvif->u.ap.ssid_len = 4;
+ memcpy(arvif->u.ap.ssid, "mesh", arvif->u.ap.ssid_len);
+ }
+ }
+
+ if (changed & BSS_CHANGED_AP_PROBE_RESP) {
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
+ arvif->dtim_period = info->dtim_period;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d dtim_period %d\n",
+ arvif->vdev_id, arvif->dtim_period);
+
+ vdev_param = ar->wmi.vdev_param->dtim_period;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ arvif->dtim_period);
+ if (ret)
+ ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_SSID &&
+ vif->type == NL80211_IFTYPE_AP) {
+ arvif->u.ap.ssid_len = vif->cfg.ssid_len;
+ if (vif->cfg.ssid_len)
+ memcpy(arvif->u.ap.ssid, vif->cfg.ssid,
+ vif->cfg.ssid_len);
+ arvif->u.ap.hidden_ssid = info->hidden_ssid;
+ }
+
+ if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ if (changed & BSS_CHANGED_FTM_RESPONDER &&
+ arvif->ftm_responder != info->ftm_responder &&
+ test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) {
+ arvif->ftm_responder = info->ftm_responder;
+
+ vdev_param = ar->wmi.vdev_param->rtt_responder_role;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ arvif->ftm_responder);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d ftm_responder %d:ret %d\n",
+ arvif->vdev_id, arvif->ftm_responder, ret);
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ ath10k_control_beaconing(arvif, info);
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ arvif->use_cts_prot = info->use_cts_prot;
+
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ if (ath10k_mac_can_set_cts_prot(arvif)) {
+ ret = ath10k_mac_set_cts_prot(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (info->use_short_slot)
+ slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
+
+ else
+ slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
+ arvif->vdev_id, slottime);
+
+ vdev_param = ar->wmi.vdev_param->slot_time;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ slottime);
+ if (ret)
+ ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ if (info->use_short_preamble)
+ preamble = WMI_VDEV_PREAMBLE_SHORT;
+ else
+ preamble = WMI_VDEV_PREAMBLE_LONG;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d preamble %dn",
+ arvif->vdev_id, preamble);
+
+ vdev_param = ar->wmi.vdev_param->preamble;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ preamble);
+ if (ret)
+ ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc) {
+ /* Workaround: Make sure monitor vdev is not running
+ * when associating to prevent some firmware revisions
+ * (e.g. 10.1 and 10.2) from crashing.
+ */
+ if (ar->monitor_started)
+ ath10k_monitor_stop(ar);
+ ath10k_bss_assoc(hw, vif, info);
+ ath10k_monitor_recalc(ar);
+ } else {
+ ath10k_bss_disassoc(hw, vif);
+ }
+ }
+
+ if (changed & BSS_CHANGED_TXPOWER) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
+ arvif->vdev_id, info->txpower);
+
+ arvif->txpower = info->txpower;
+ ret = ath10k_mac_txpower_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+ }
+
+ if (changed & BSS_CHANGED_PS) {
+ arvif->ps = vif->cfg.ps;
+
+ ret = ath10k_config_ps(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_MCAST_RATE &&
+ !ath10k_mac_vif_chan(arvif->vif, &def)) {
+ band = def.chan->band;
+ mcast_rate = vif->bss_conf.mcast_rate[band];
+ if (mcast_rate > 0)
+ rateidx = mcast_rate - 1;
+ else
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+
+ bitrate = ath10k_wmi_legacy_rates[rateidx].bitrate;
+ hw_value = ath10k_wmi_legacy_rates[rateidx].hw_value;
+ if (ath10k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ rate = ATH10K_HW_RATECODE(hw_value, 0, preamble);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d mcast_rate %x\n",
+ arvif->vdev_id, rate);
+
+ vdev_param = ar->wmi.vdev_param->mcast_data_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath10k_warn(ar,
+ "failed to set mcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ vdev_param = ar->wmi.vdev_param->bcast_data_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath10k_warn(ar,
+ "failed to set bcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES &&
+ !ath10k_mac_vif_chan(arvif->vif, &def))
+ ath10k_recalculate_mgmt_rate(ar, vif, &def);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, int radio_idx,
+ s16 value)
+{
+ struct ath10k *ar = hw->priv;
+
+ /* This function should never be called if setting the coverage class
+ * is not supported on this hardware.
+ */
+ if (!ar->hw_params.hw_ops->set_coverage_class) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+ ar->hw_params.hw_ops->set_coverage_class(ar, -1, value);
+}
+
+struct ath10k_mac_tdls_iter_data {
+ u32 num_tdls_stations;
+ struct ieee80211_vif *curr_vif;
+};
+
+static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_mac_tdls_iter_data *iter_data = data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ieee80211_vif *sta_vif = arsta->arvif->vif;
+
+ if (sta->tdls && sta_vif == iter_data->curr_vif)
+ iter_data->num_tdls_stations++;
+}
+
+static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_mac_tdls_iter_data data = {};
+
+ data.curr_vif = vif;
+
+ ieee80211_iterate_stations_atomic(hw,
+ ath10k_mac_tdls_vif_stations_count_iter,
+ &data);
+ return data.num_tdls_stations;
+}
+
+static int ath10k_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_scan_request *req = &hw_req->req;
+ struct wmi_start_scan_arg *arg = NULL;
+ int ret = 0;
+ int i;
+ u32 scan_timeout;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ ar->scan.state = ATH10K_SCAN_STARTING;
+ ar->scan.is_roc = false;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ret = 0;
+ break;
+ case ATH10K_SCAN_STARTING:
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ath10k_wmi_start_scan_init(ar, arg);
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH10K_SCAN_ID;
+
+ if (req->ie_len) {
+ arg->ie_len = req->ie_len;
+ memcpy(arg->ie, req->ie, arg->ie_len);
+ }
+
+ if (req->n_ssids) {
+ arg->n_ssids = req->n_ssids;
+ for (i = 0; i < arg->n_ssids; i++) {
+ arg->ssids[i].len = req->ssids[i].ssid_len;
+ arg->ssids[i].ssid = req->ssids[i].ssid;
+ }
+ } else {
+ arg->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+ }
+
+ if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ arg->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ;
+ ether_addr_copy(arg->mac_addr.addr, req->mac_addr);
+ ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask);
+ }
+
+ if (req->n_channels) {
+ arg->n_channels = req->n_channels;
+ for (i = 0; i < arg->n_channels; i++)
+ arg->channels[i] = req->channels[i]->center_freq;
+ }
+
+ /* if duration is set, default dwell times will be overwritten */
+ if (req->duration) {
+ arg->dwell_time_active = req->duration;
+ arg->dwell_time_passive = req->duration;
+ arg->burst_duration_ms = req->duration;
+
+ scan_timeout = min_t(u32, arg->max_rest_time *
+ (arg->n_channels - 1) + (req->duration +
+ ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) *
+ arg->n_channels, arg->max_scan_time);
+ } else {
+ scan_timeout = arg->max_scan_time;
+ }
+
+ /* Add a 200ms margin to account for event/command processing */
+ scan_timeout += 200;
+
+ ret = ath10k_start_scan(ar, arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH10K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(scan_timeout));
+
+exit:
+ kfree(arg);
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ ath10k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+}
+
+static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ enum set_key_cmd cmd,
+ struct ieee80211_key_conf *key)
+{
+ u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
+ int ret;
+
+ /* 10.1 firmware branch requires default key index to be set to group
+ * key index after installing it. Otherwise FW/HW Txes corrupted
+ * frames with multi-vif APs. This is not required for main firmware
+ * branch (e.g. 636).
+ *
+ * This is also needed for 636 fw for IBSS-RSN to work more reliably.
+ *
+ * FIXME: It remains unknown if this is required for multi-vif STA
+ * interfaces on 10.1.
+ */
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ return;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
+ return;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
+ return;
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ return;
+
+ if (cmd != SET_KEY)
+ return;
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ key->keyidx);
+ if (ret)
+ ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
+ arvif->vdev_id, ret);
+}
+
+static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_sta *arsta;
+ struct ath10k_peer *peer;
+ const u8 *peer_addr;
+ bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104;
+ int ret = 0;
+ int ret2;
+ u32 flags = 0;
+ u32 flags2;
+
+ /* this one needs to be done in software */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
+ return 1;
+
+ if (arvif->nohwcrypt)
+ return 1;
+
+ if (key->keyidx > WMI_MAX_KEY_INDEX)
+ return -ENOSPC;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (sta) {
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ peer_addr = sta->addr;
+ spin_lock_bh(&ar->data_lock);
+ arsta->ucast_cipher = key->cipher;
+ spin_unlock_bh(&ar->data_lock);
+ } else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ peer_addr = vif->bss_conf.bssid;
+ } else {
+ peer_addr = vif->addr;
+ }
+
+ key->hw_key_idx = key->keyidx;
+
+ if (is_wep) {
+ if (cmd == SET_KEY)
+ arvif->wep_keys[key->keyidx] = key;
+ else
+ arvif->wep_keys[key->keyidx] = NULL;
+ }
+
+ /* the peer should not disappear in mid-way (unless FW goes awry) since
+ * we already hold conf_mutex. we just make sure its there now.
+ */
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer) {
+ if (cmd == SET_KEY) {
+ ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
+ peer_addr);
+ ret = -EOPNOTSUPP;
+ goto exit;
+ } else {
+ /* if the peer doesn't exist there is no key to disable anymore */
+ goto exit;
+ }
+ }
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ flags |= WMI_KEY_PAIRWISE;
+ else
+ flags |= WMI_KEY_GROUP;
+
+ if (is_wep) {
+ if (cmd == DISABLE_KEY)
+ ath10k_clear_vdev_key(arvif, key);
+
+ /* When WEP keys are uploaded it's possible that there are
+ * stations associated already (e.g. when merging) without any
+ * keys. Static WEP needs an explicit per-peer key upload.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ cmd == SET_KEY)
+ ath10k_mac_vif_update_wep_key(arvif, key);
+
+ /* 802.1x never sets the def_wep_key_idx so each set_key()
+ * call changes default tx key.
+ *
+ * Static WEP sets def_wep_key_idx via .set_default_unicast_key
+ * after first set_key().
+ */
+ if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
+ flags |= WMI_KEY_TX_USAGE;
+ }
+
+ ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
+ if (ret) {
+ WARN_ON(ret > 0);
+ ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
+ arvif->vdev_id, peer_addr, ret);
+ goto exit;
+ }
+
+ /* mac80211 sets static WEP keys as groupwise while firmware requires
+ * them to be installed twice as both pairwise and groupwise.
+ */
+ if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
+ flags2 = flags;
+ flags2 &= ~WMI_KEY_GROUP;
+ flags2 |= WMI_KEY_PAIRWISE;
+
+ ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
+ if (ret) {
+ WARN_ON(ret > 0);
+ ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
+ arvif->vdev_id, peer_addr, ret);
+ ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
+ peer_addr, flags);
+ if (ret2) {
+ WARN_ON(ret2 > 0);
+ ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
+ arvif->vdev_id, peer_addr, ret2);
+ }
+ goto exit;
+ }
+ }
+
+ ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
+ if (peer && cmd == SET_KEY)
+ peer->keys[key->keyidx] = key;
+ else if (peer && cmd == DISABLE_KEY)
+ peer->keys[key->keyidx] = NULL;
+ else if (peer == NULL)
+ /* impossible unless FW goes crazy */
+ ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (sta && sta->tdls)
+ ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ ar->wmi.peer_param->authorize, 1);
+ else if (sta && cmd == SET_KEY && (key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ ath10k_wmi_peer_set_param(ar, arvif->vdev_id, peer_addr,
+ ar->wmi.peer_param->authorize, 1);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ int keyidx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&arvif->ar->conf_mutex);
+
+ if (arvif->ar->state != ATH10K_STATE_ON)
+ goto unlock;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
+ arvif->vdev_id, keyidx);
+
+ ret = ath10k_wmi_vdev_set_param(arvif->ar,
+ arvif->vdev_id,
+ arvif->ar->wmi.vdev_param->def_keyid,
+ keyidx);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
+ arvif->vdev_id,
+ ret);
+ goto unlock;
+ }
+
+ arvif->def_wep_key_idx = keyidx;
+
+unlock:
+ mutex_unlock(&arvif->ar->conf_mutex);
+}
+
+static void ath10k_sta_rc_update_wk(struct work_struct *wk)
+{
+ struct ath10k *ar;
+ struct ath10k_vif *arvif;
+ struct ath10k_sta *arsta;
+ struct ieee80211_sta *sta;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u32 changed, bw, nss, smps;
+ int err;
+
+ arsta = container_of(wk, struct ath10k_sta, update_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ changed = arsta->changed;
+ arsta->changed = 0;
+
+ bw = arsta->bw;
+ nss = arsta->nss;
+ smps = arsta->smps;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_lock(&ar->conf_mutex);
+
+ nss = max_t(u32, 1, nss);
+ nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+ ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ enum wmi_phy_mode mode;
+
+ mode = chan_to_phymode(&def);
+ ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM peer bw %d phymode %d\n",
+ sta->addr, bw, mode);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ ar->wmi.peer_param->phymode, mode);
+ if (err) {
+ ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
+ sta->addr, mode, err);
+ goto exit;
+ }
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ ar->wmi.peer_param->chan_width, bw);
+ if (err)
+ ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM nss %d\n",
+ sta->addr, nss);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ ar->wmi.peer_param->nss, nss);
+ if (err)
+ ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
+ sta->addr, nss, err);
+ }
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM smps %d\n",
+ sta->addr, smps);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ ar->wmi.peer_param->smps_state, smps);
+ if (err)
+ ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
+ sta->addr, smps, err);
+ }
+
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM supp rates\n",
+ sta->addr);
+
+ err = ath10k_station_assoc(ar, arvif->vif, sta, true);
+ if (err)
+ ath10k_warn(ar, "failed to reassociate station: %pM\n",
+ sta->addr);
+ }
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return 0;
+
+ if (ar->num_stations >= ar->max_num_stations)
+ return -ENOBUFS;
+
+ ar->num_stations++;
+
+ return 0;
+}
+
+static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return;
+
+ ar->num_stations--;
+}
+
+static int ath10k_sta_set_txpwr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret = 0;
+ s16 txpwr;
+
+ if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
+ txpwr = 0;
+ } else {
+ txpwr = sta->deflink.txpwr.power;
+ if (!txpwr)
+ return -EINVAL;
+ }
+
+ if (txpwr > ATH10K_TX_POWER_MAX_VAL || txpwr < ATH10K_TX_POWER_MIN_VAL)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ ar->wmi.peer_param->use_fixed_power, txpwr);
+ if (ret) {
+ ath10k_warn(ar, "failed to set tx power for station ret: %d\n",
+ ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+struct ath10k_mac_iter_tid_conf_data {
+ struct ieee80211_vif *curr_vif;
+ struct ath10k *ar;
+ bool reset_config;
+};
+
+static bool
+ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ int *vht_num_rates)
+{
+ int num_rates = 0;
+ int i, tmp;
+
+ num_rates += hweight32(mask->control[band].legacy);
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+ num_rates += hweight8(mask->control[band].ht_mcs[i]);
+
+ *vht_num_rates = 0;
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ tmp = hweight16(mask->control[band].vht_mcs[i]);
+ num_rates += tmp;
+ *vht_num_rates += tmp;
+ }
+
+ return num_rates == 1;
+}
+
+static int
+ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ u8 *rate, u8 *nss, bool vht_only)
+{
+ int rate_idx;
+ int i;
+ u16 bitrate;
+ u8 preamble;
+ u8 hw_rate;
+
+ if (vht_only)
+ goto next;
+
+ if (hweight32(mask->control[band].legacy) == 1) {
+ rate_idx = ffs(mask->control[band].legacy) - 1;
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+
+ hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value;
+ bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate;
+
+ if (ath10k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ *nss = 1;
+ *rate = preamble << 6 |
+ (*nss - 1) << 4 |
+ hw_rate << 0;
+
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
+ *nss = i + 1;
+ *rate = WMI_RATE_PREAMBLE_HT << 6 |
+ (*nss - 1) << 4 |
+ (ffs(mask->control[band].ht_mcs[i]) - 1);
+
+ return 0;
+ }
+ }
+
+next:
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+ *nss = i + 1;
+ *rate = WMI_RATE_PREAMBLE_VHT << 6 |
+ (*nss - 1) << 4 |
+ (ffs(mask->control[band].vht_mcs[i]) - 1);
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ath10k_mac_validate_rate_mask(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ u32 rate_ctrl_flag, u8 nss)
+{
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+
+ if (nss > sta->deflink.rx_nss) {
+ ath10k_warn(ar, "Invalid nss field, configured %u limit %u\n",
+ nss, sta->deflink.rx_nss);
+ return -EINVAL;
+ }
+
+ if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_VHT) {
+ if (!vht_cap->vht_supported) {
+ ath10k_warn(ar, "Invalid VHT rate for sta %pM\n",
+ sta->addr);
+ return -EINVAL;
+ }
+ } else if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_HT) {
+ if (!ht_cap->ht_supported || vht_cap->vht_supported) {
+ ath10k_warn(ar, "Invalid HT rate for sta %pM\n",
+ sta->addr);
+ return -EINVAL;
+ }
+ } else {
+ if (ht_cap->ht_supported || vht_cap->vht_supported)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ath10k_mac_tid_bitrate_config(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 *rate_ctrl_flag, u8 *rate_ctrl,
+ enum nl80211_tx_rate_setting txrate_type,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 nss, rate;
+ int vht_num_rates, ret;
+
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return -EINVAL;
+
+ if (txrate_type == NL80211_TX_RATE_AUTOMATIC) {
+ *rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_AUTO;
+ *rate_ctrl_flag = 0;
+ return 0;
+ }
+
+ band = def.chan->band;
+
+ if (!ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask,
+ &vht_num_rates)) {
+ return -EINVAL;
+ }
+
+ ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
+ &rate, &nss, false);
+ if (ret) {
+ ath10k_warn(ar, "failed to get single rate: %d\n",
+ ret);
+ return ret;
+ }
+
+ *rate_ctrl_flag = rate;
+
+ if (sta && ath10k_mac_validate_rate_mask(ar, sta, *rate_ctrl_flag, nss))
+ return -EINVAL;
+
+ if (txrate_type == NL80211_TX_RATE_FIXED)
+ *rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_FIXED_RATE;
+ else if (txrate_type == NL80211_TX_RATE_LIMITED &&
+ (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
+ ar->wmi.svc_map)))
+ *rate_ctrl = WMI_PEER_TID_CONFIG_RATE_UPPER_CAP;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int ath10k_mac_set_tid_config(struct ath10k *ar, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, u32 changed,
+ struct wmi_per_peer_per_tid_cfg_arg *arg)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_sta *arsta;
+ int ret;
+
+ if (sta) {
+ if (!sta->wme)
+ return -EOPNOTSUPP;
+
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
+ if ((arsta->retry_long[arg->tid] > 0 ||
+ arsta->rate_code[arg->tid] > 0 ||
+ arsta->ampdu[arg->tid] ==
+ WMI_TID_CONFIG_AGGR_CONTROL_ENABLE) &&
+ arg->ack_policy == WMI_PEER_TID_CONFIG_NOACK) {
+ changed &= ~BIT(NL80211_TID_CONFIG_ATTR_NOACK);
+ arg->ack_policy = 0;
+ arg->aggr_control = 0;
+ arg->rate_ctrl = 0;
+ arg->rcode_flags = 0;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ if (arsta->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK ||
+ arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) {
+ arg->aggr_control = 0;
+ changed &= ~BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG);
+ }
+ }
+
+ if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
+ if (arsta->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK ||
+ arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) {
+ arg->rate_ctrl = 0;
+ arg->rcode_flags = 0;
+ }
+ }
+
+ ether_addr_copy(arg->peer_macaddr.addr, sta->addr);
+
+ ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, arg);
+ if (ret)
+ return ret;
+
+ /* Store the configured parameters in success case */
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
+ arsta->noack[arg->tid] = arg->ack_policy;
+ arg->ack_policy = 0;
+ arg->aggr_control = 0;
+ arg->rate_ctrl = 0;
+ arg->rcode_flags = 0;
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) {
+ arsta->retry_long[arg->tid] = arg->retry_count;
+ arg->retry_count = 0;
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ arsta->ampdu[arg->tid] = arg->aggr_control;
+ arg->aggr_control = 0;
+ }
+
+ if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
+ arsta->rate_ctrl[arg->tid] = arg->rate_ctrl;
+ arg->rate_ctrl = 0;
+ arg->rcode_flags = 0;
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
+ arsta->rtscts[arg->tid] = arg->rtscts_ctrl;
+ arg->ext_tid_cfg_bitmap = 0;
+ }
+ } else {
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
+ if ((arvif->retry_long[arg->tid] ||
+ arvif->rate_code[arg->tid] ||
+ arvif->ampdu[arg->tid] ==
+ WMI_TID_CONFIG_AGGR_CONTROL_ENABLE) &&
+ arg->ack_policy == WMI_PEER_TID_CONFIG_NOACK) {
+ changed &= ~BIT(NL80211_TID_CONFIG_ATTR_NOACK);
+ } else {
+ arvif->noack[arg->tid] = arg->ack_policy;
+ arvif->ampdu[arg->tid] = arg->aggr_control;
+ arvif->rate_ctrl[arg->tid] = arg->rate_ctrl;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) {
+ if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK)
+ changed &= ~BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG);
+ else
+ arvif->retry_long[arg->tid] = arg->retry_count;
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK)
+ changed &= ~BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
+ else
+ arvif->ampdu[arg->tid] = arg->aggr_control;
+ }
+
+ if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
+ if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) {
+ changed &= ~(BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE));
+ } else {
+ arvif->rate_ctrl[arg->tid] = arg->rate_ctrl;
+ arvif->rate_code[arg->tid] = arg->rcode_flags;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
+ arvif->rtscts[arg->tid] = arg->rtscts_ctrl;
+ arg->ext_tid_cfg_bitmap = 0;
+ }
+
+ if (changed)
+ arvif->tid_conf_changed[arg->tid] |= changed;
+ }
+
+ return 0;
+}
+
+static int
+ath10k_mac_parse_tid_config(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif,
+ struct cfg80211_tid_cfg *tid_conf,
+ struct wmi_per_peer_per_tid_cfg_arg *arg)
+{
+ u32 changed = tid_conf->mask;
+ int ret = 0, i = 0;
+
+ if (!changed)
+ return -EINVAL;
+
+ while (i < ATH10K_TID_MAX) {
+ if (!(tid_conf->tids & BIT(i))) {
+ i++;
+ continue;
+ }
+
+ arg->tid = i;
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
+ if (tid_conf->noack == NL80211_TID_CONFIG_ENABLE) {
+ arg->ack_policy = WMI_PEER_TID_CONFIG_NOACK;
+ arg->rate_ctrl =
+ WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE;
+ arg->aggr_control =
+ WMI_TID_CONFIG_AGGR_CONTROL_DISABLE;
+ } else {
+ arg->ack_policy =
+ WMI_PEER_TID_CONFIG_ACK;
+ arg->rate_ctrl =
+ WMI_TID_CONFIG_RATE_CONTROL_AUTO;
+ arg->aggr_control =
+ WMI_TID_CONFIG_AGGR_CONTROL_ENABLE;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG))
+ arg->retry_count = tid_conf->retry_long;
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ if (tid_conf->noack == NL80211_TID_CONFIG_ENABLE)
+ arg->aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_ENABLE;
+ else
+ arg->aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_DISABLE;
+ }
+
+ if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
+ ret = ath10k_mac_tid_bitrate_config(ar, vif, sta,
+ &arg->rcode_flags,
+ &arg->rate_ctrl,
+ tid_conf->txrate_type,
+ &tid_conf->txrate_mask);
+ if (ret) {
+ ath10k_warn(ar, "failed to configure bitrate mask %d\n",
+ ret);
+ arg->rcode_flags = 0;
+ arg->rate_ctrl = 0;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
+ if (tid_conf->rtscts)
+ arg->rtscts_ctrl = tid_conf->rtscts;
+
+ arg->ext_tid_cfg_bitmap = WMI_EXT_TID_RTS_CTS_CONFIG;
+ }
+
+ ret = ath10k_mac_set_tid_config(ar, sta, vif, changed, arg);
+ if (ret)
+ return ret;
+ i++;
+ }
+
+ return ret;
+}
+
+static int ath10k_mac_reset_tid_config(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct ath10k_vif *arvif,
+ u8 tids)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct wmi_per_peer_per_tid_cfg_arg arg;
+ int ret = 0, i = 0;
+
+ arg.vdev_id = arvif->vdev_id;
+ while (i < ATH10K_TID_MAX) {
+ if (!(tids & BIT(i))) {
+ i++;
+ continue;
+ }
+
+ arg.tid = i;
+ arg.ack_policy = WMI_PEER_TID_CONFIG_ACK;
+ arg.retry_count = ATH10K_MAX_RETRY_COUNT;
+ arg.rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_AUTO;
+ arg.aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_ENABLE;
+ arg.rtscts_ctrl = WMI_TID_CONFIG_RTSCTS_CONTROL_ENABLE;
+ arg.ext_tid_cfg_bitmap = WMI_EXT_TID_RTS_CTS_CONFIG;
+
+ ether_addr_copy(arg.peer_macaddr.addr, sta->addr);
+
+ ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, &arg);
+ if (ret)
+ return ret;
+
+ if (!arvif->tids_rst) {
+ arsta->retry_long[i] = -1;
+ arsta->noack[i] = -1;
+ arsta->ampdu[i] = -1;
+ arsta->rate_code[i] = -1;
+ arsta->rate_ctrl[i] = 0;
+ arsta->rtscts[i] = -1;
+ } else {
+ arvif->retry_long[i] = 0;
+ arvif->noack[i] = 0;
+ arvif->ampdu[i] = 0;
+ arvif->rate_code[i] = 0;
+ arvif->rate_ctrl[i] = 0;
+ arvif->rtscts[i] = 0;
+ }
+
+ i++;
+ }
+
+ return ret;
+}
+
+static void ath10k_sta_tid_cfg_wk(struct work_struct *wk)
+{
+ struct wmi_per_peer_per_tid_cfg_arg arg = {};
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+ struct ath10k_vif *arvif;
+ struct ath10k *ar;
+ bool config_apply;
+ int ret, i;
+ u32 changed;
+ u8 nss;
+
+ arsta = container_of(wk, struct ath10k_sta, tid_config_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (arvif->tids_rst) {
+ ret = ath10k_mac_reset_tid_config(ar, sta, arvif,
+ arvif->tids_rst);
+ goto exit;
+ }
+
+ ether_addr_copy(arg.peer_macaddr.addr, sta->addr);
+
+ for (i = 0; i < ATH10K_TID_MAX; i++) {
+ config_apply = false;
+ changed = arvif->tid_conf_changed[i];
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
+ if (arsta->noack[i] != -1) {
+ arg.ack_policy = 0;
+ } else {
+ config_apply = true;
+ arg.ack_policy = arvif->noack[i];
+ arg.aggr_control = arvif->ampdu[i];
+ arg.rate_ctrl = arvif->rate_ctrl[i];
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) {
+ if (arsta->retry_long[i] != -1 ||
+ arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK ||
+ arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) {
+ arg.retry_count = 0;
+ } else {
+ arg.retry_count = arvif->retry_long[i];
+ config_apply = true;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ if (arsta->ampdu[i] != -1 ||
+ arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK ||
+ arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) {
+ arg.aggr_control = 0;
+ } else {
+ arg.aggr_control = arvif->ampdu[i];
+ config_apply = true;
+ }
+ }
+
+ if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
+ nss = ATH10K_HW_NSS(arvif->rate_code[i]);
+ ret = ath10k_mac_validate_rate_mask(ar, sta,
+ arvif->rate_code[i],
+ nss);
+ if (ret &&
+ arvif->rate_ctrl[i] > WMI_TID_CONFIG_RATE_CONTROL_AUTO) {
+ arg.rate_ctrl = 0;
+ arg.rcode_flags = 0;
+ }
+
+ if (arsta->rate_ctrl[i] >
+ WMI_TID_CONFIG_RATE_CONTROL_AUTO ||
+ arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK ||
+ arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) {
+ arg.rate_ctrl = 0;
+ arg.rcode_flags = 0;
+ } else {
+ arg.rate_ctrl = arvif->rate_ctrl[i];
+ arg.rcode_flags = arvif->rate_code[i];
+ config_apply = true;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
+ if (arsta->rtscts[i]) {
+ arg.rtscts_ctrl = 0;
+ arg.ext_tid_cfg_bitmap = 0;
+ } else {
+ arg.rtscts_ctrl = arvif->rtscts[i] - 1;
+ arg.ext_tid_cfg_bitmap =
+ WMI_EXT_TID_RTS_CTS_CONFIG;
+ config_apply = true;
+ }
+ }
+
+ arg.tid = i;
+
+ if (config_apply) {
+ ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, &arg);
+ if (ret)
+ ath10k_warn(ar, "failed to set per tid config for sta %pM: %d\n",
+ sta->addr, ret);
+ }
+
+ arg.ack_policy = 0;
+ arg.retry_count = 0;
+ arg.aggr_control = 0;
+ arg.rate_ctrl = 0;
+ arg.rcode_flags = 0;
+ }
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_mac_vif_stations_tid_conf(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k_mac_iter_tid_conf_data *iter_data = data;
+ struct ieee80211_vif *sta_vif = arsta->arvif->vif;
+
+ if (sta_vif != iter_data->curr_vif || !sta->wme)
+ return;
+
+ ieee80211_queue_work(iter_data->ar->hw, &arsta->tid_config_wk);
+}
+
+static int ath10k_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k_peer *peer;
+ int ret = 0;
+ int i;
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
+ INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+ INIT_WORK(&arsta->tid_config_wk, ath10k_sta_tid_cfg_wk);
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ ath10k_mac_txq_init(sta->txq[i]);
+ }
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ cancel_work_sync(&arsta->update_wk);
+ cancel_work_sync(&arsta->tid_config_wk);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ /*
+ * New station addition.
+ */
+ enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
+ u32 num_tdls_stations;
+
+ ath10k_dbg(ar, ATH10K_DBG_STA,
+ "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
+ arvif->vdev_id, sta->addr,
+ ar->num_stations + 1, ar->max_num_stations,
+ ar->num_peers + 1, ar->max_num_peers);
+
+ num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
+
+ if (sta->tdls) {
+ if (num_tdls_stations >= ar->max_num_tdls_vdevs) {
+ ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
+ arvif->vdev_id,
+ ar->max_num_tdls_vdevs);
+ ret = -ELNRNG;
+ goto exit;
+ }
+ peer_type = WMI_PEER_TYPE_TDLS;
+ }
+
+ ret = ath10k_mac_inc_num_stations(arvif, sta);
+ if (ret) {
+ ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
+ goto exit;
+ }
+
+ if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
+ arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats),
+ GFP_KERNEL);
+ if (!arsta->tx_stats) {
+ ath10k_mac_dec_num_stations(arvif, sta);
+ ret = -ENOMEM;
+ goto exit;
+ }
+ }
+
+ ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
+ sta->addr, peer_type);
+ if (ret) {
+ ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ ath10k_mac_dec_num_stations(arvif, sta);
+ kfree(arsta->tx_stats);
+ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
+ if (!peer) {
+ ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+ vif->addr, arvif->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
+ kfree(arsta->tx_stats);
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ arsta->peer_id = find_first_bit(peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!sta->tdls)
+ goto exit;
+
+ ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ WMI_TDLS_ENABLE_ACTIVE);
+ if (ret) {
+ ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
+ arvif->vdev_id, ret);
+ ath10k_peer_delete(ar, arvif->vdev_id,
+ sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
+ kfree(arsta->tx_stats);
+ goto exit;
+ }
+
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+ WMI_TDLS_PEER_STATE_PEERING);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
+ kfree(arsta->tx_stats);
+
+ if (num_tdls_stations != 0)
+ goto exit;
+ ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ WMI_TDLS_DISABLE);
+ }
+ } else if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ /*
+ * Existing station deletion.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_STA,
+ "mac vdev %d peer delete %pM sta %p (sta gone)\n",
+ arvif->vdev_id, sta->addr, sta);
+
+ if (sta->tdls) {
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
+ sta,
+ WMI_TDLS_PEER_STATE_TEARDOWN);
+ if (ret)
+ ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n",
+ sta->addr,
+ WMI_TDLS_PEER_STATE_TEARDOWN, ret);
+ }
+
+ ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+
+ ath10k_mac_dec_num_stations(arvif, sta);
+
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ peer = ar->peer_map[i];
+ if (!peer)
+ continue;
+
+ if (peer->sta == sta) {
+ ath10k_warn(ar, "found sta peer %pM (ptr %p id %d) entry on vdev %i after it was supposedly removed\n",
+ sta->addr, peer, i, arvif->vdev_id);
+ peer->sta = NULL;
+
+ /* Clean up the peer object as well since we
+ * must have failed to do this above.
+ */
+ ath10k_peer_map_cleanup(ar, peer);
+ }
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ ath10k_mac_txq_unref(ar, sta->txq[i]);
+
+ if (!sta->tdls)
+ goto exit;
+
+ if (ath10k_mac_tdls_vif_stations_count(hw, vif))
+ goto exit;
+
+ /* This was the last tdls peer in current vif */
+ ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ WMI_TDLS_DISABLE);
+ if (ret) {
+ ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
+ arvif->vdev_id, ret);
+ }
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ /*
+ * New association.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_STA, "mac sta %pM associated\n",
+ sta->addr);
+
+ ret = ath10k_station_assoc(ar, vif, sta, false);
+ if (ret)
+ ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED &&
+ sta->tdls) {
+ /*
+ * Tdls station authorized.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_STA, "mac tdls sta %pM authorized\n",
+ sta->addr);
+
+ ret = ath10k_station_assoc(ar, vif, sta, false);
+ if (ret) {
+ ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto exit;
+ }
+
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+ WMI_TDLS_PEER_STATE_CONNECTED);
+ if (ret)
+ ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ /*
+ * Disassociation.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_STA, "mac sta %pM disassociated\n",
+ sta->addr);
+
+ ret = ath10k_station_disassoc(ar, vif, sta);
+ if (ret)
+ ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ }
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
+ u16 ac, bool enable)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_sta_uapsd_auto_trig_arg arg = {};
+ u32 prio = 0, acc = 0;
+ u32 value = 0;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
+ prio = 7;
+ acc = 3;
+ break;
+ case IEEE80211_AC_VI:
+ value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
+ prio = 5;
+ acc = 2;
+ break;
+ case IEEE80211_AC_BE:
+ value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
+ prio = 2;
+ acc = 1;
+ break;
+ case IEEE80211_AC_BK:
+ value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
+ prio = 0;
+ acc = 0;
+ break;
+ }
+
+ if (enable)
+ arvif->u.sta.uapsd |= value;
+ else
+ arvif->u.sta.uapsd &= ~value;
+
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_UAPSD,
+ arvif->u.sta.uapsd);
+ if (ret) {
+ ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
+ goto exit;
+ }
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
+ else
+ value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY,
+ value);
+ if (ret)
+ ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
+
+ ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
+ test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
+ /* Only userspace can make an educated decision when to send
+ * trigger frame. The following effectively disables u-UAPSD
+ * autotrigger in firmware (which is enabled by default
+ * provided the autotrigger service is available).
+ */
+
+ arg.wmm_ac = acc;
+ arg.user_priority = prio;
+ arg.service_interval = 0;
+ arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
+ arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
+
+ ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
+ arvif->bssid, &arg, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+exit:
+ return ret;
+}
+
+static int ath10k_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_wmm_params_arg *p = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ p = &arvif->wmm_params.ac_vo;
+ break;
+ case IEEE80211_AC_VI:
+ p = &arvif->wmm_params.ac_vi;
+ break;
+ case IEEE80211_AC_BE:
+ p = &arvif->wmm_params.ac_be;
+ break;
+ case IEEE80211_AC_BK:
+ p = &arvif->wmm_params.ac_bk;
+ break;
+ }
+
+ if (WARN_ON(!p)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ p->cwmin = params->cw_min;
+ p->cwmax = params->cw_max;
+ p->aifs = params->aifs;
+
+ /*
+ * The channel time duration programmed in the HW is in absolute
+ * microseconds, while mac80211 gives the txop in units of
+ * 32 microseconds.
+ */
+ p->txop = params->txop * 32;
+
+ if (ar->wmi.ops->gen_vdev_wmm_conf) {
+ ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
+ &arvif->wmm_params);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto exit;
+ }
+ } else {
+ /* This won't work well with multi-interface cases but it's
+ * better than nothing.
+ */
+ ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
+ if (ret) {
+ ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
+ goto exit;
+ }
+ }
+
+ ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
+ if (ret)
+ ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_start_scan_arg *arg = NULL;
+ int ret = 0;
+ u32 scan_time_msec;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ reinit_completion(&ar->scan.on_channel);
+ ar->scan.state = ATH10K_SCAN_STARTING;
+ ar->scan.is_roc = true;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.roc_freq = chan->center_freq;
+ ar->scan.roc_notify = true;
+ ret = 0;
+ break;
+ case ATH10K_SCAN_STARTING:
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
+
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ath10k_wmi_start_scan_init(ar, arg);
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH10K_SCAN_ID;
+ arg->n_channels = 1;
+ arg->channels[0] = chan->center_freq;
+ arg->dwell_time_active = scan_time_msec;
+ arg->dwell_time_passive = scan_time_msec;
+ arg->max_scan_time = scan_time_msec;
+ arg->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ arg->burst_duration_ms = duration;
+
+ ret = ath10k_start_scan(ar, arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH10K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ goto exit;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "failed to switch to channel for roc scan\n");
+
+ ret = ath10k_scan_stop(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to stop scan: %d\n", ret);
+
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(duration));
+
+ ret = 0;
+exit:
+ kfree(arg);
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.roc_notify = false;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_scan_abort(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+
+ return 0;
+}
+
+/*
+ * Both RTS and Fragmentation threshold are interface-specific
+ * in ath10k, but device-specific in mac80211.
+ */
+
+static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, int radio_idx,
+ u32 value)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
+ arvif->vdev_id, value);
+
+ ret = ath10k_mac_set_rts(arvif, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
+{
+ /* Even though there's a WMI enum for fragmentation threshold no known
+ * firmware actually implements it. Moreover it is not possible to rely
+ * frame fragmentation to mac80211 because firmware clears the "more
+ * fragments" bit in frame control making it impossible for remote
+ * devices to reassemble frames.
+ *
+ * Hence implement a dummy callback just to say fragmentation isn't
+ * supported. This effectively prevents mac80211 from doing frame
+ * fragmentation in software.
+ */
+ return -EOPNOTSUPP;
+}
+
+void ath10k_mac_wait_tx_complete(struct ath10k *ar)
+{
+ bool skip;
+ long time_left;
+
+ /* mac80211 doesn't care if we really xmit queued frames or not
+ * we'll collect those frames either way if we stop/delete vdevs
+ */
+
+ if (ar->state == ATH10K_STATE_WEDGED)
+ return;
+
+ time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
+ bool empty;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ empty = (ar->htt.num_pending_tx == 0);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ skip = (ar->state == ATH10K_STATE_WEDGED) ||
+ test_bit(ATH10K_FLAG_CRASH_FLUSH,
+ &ar->dev_flags);
+
+ (empty || skip);
+ }), ATH10K_FLUSH_TIMEOUT_HZ);
+
+ if (time_left == 0 || skip)
+ ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
+ skip, ar->state, time_left);
+}
+
+static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif;
+ u32 bitmap;
+
+ if (drop) {
+ if (vif && vif->type == NL80211_IFTYPE_STATION) {
+ bitmap = ~(1 << WMI_MGMT_TID);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ath10k_wmi_peer_flush(ar, arvif->vdev_id,
+ arvif->bssid, bitmap);
+ }
+ ath10k_htt_flush_tx(&ar->htt);
+ }
+ return;
+ }
+
+ mutex_lock(&ar->conf_mutex);
+ ath10k_mac_wait_tx_complete(ar);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/* TODO: Implement this function properly
+ * For now it is needed to reply to Probe Requests in IBSS mode.
+ * Probably we need this information from FW.
+ */
+static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ return 1;
+}
+
+static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif;
+
+ if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ return;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* If device failed to restart it will be in a different state, e.g.
+ * ATH10K_STATE_WEDGED
+ */
+ if (ar->state == ATH10K_STATE_RESTARTED) {
+ ath10k_info(ar, "device successfully recovered\n");
+ ar->state = ATH10K_STATE_ON;
+ ieee80211_wake_queues(ar->hw);
+
+ /* Clear recovery state. */
+ complete(&ar->driver_recovery);
+ atomic_set(&ar->fail_cont_count, 0);
+ atomic_set(&ar->pending_recovery, 0);
+
+ if (ar->hw_params.hw_restart_disconnect) {
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ieee80211_hw_restart_disconnect(arvif->vif);
+ }
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void
+ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
+ struct ieee80211_channel *channel)
+{
+ int ret;
+ enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
+ (ar->rx_channel != channel))
+ return;
+
+ if (ar->scan.state != ATH10K_SCAN_IDLE) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
+ return;
+ }
+
+ reinit_completion(&ar->bss_survey_done);
+
+ ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
+ if (ret) {
+ ath10k_warn(ar, "failed to send pdev bss chan info request\n");
+ return;
+ }
+
+ ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
+ if (!ret) {
+ ath10k_warn(ar, "bss channel survey timed out\n");
+ return;
+ }
+}
+
+static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath10k *ar = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct survey_info *ar_survey = &ar->survey[idx];
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+
+ if (!sband || idx >= sband->n_channels) {
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
+
+ spin_lock_bh(&ar->data_lock);
+ memcpy(survey, ar_survey, sizeof(*survey));
+ spin_unlock_bh(&ar->data_lock);
+
+ survey->channel = &sband->channels[idx];
+
+ if (ar->rx_channel == survey->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static bool
+ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ int *nss)
+{
+ struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+ u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u8 ht_nss_mask = 0;
+ u8 vht_nss_mask = 0;
+ int i;
+
+ if (mask->control[band].legacy)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (mask->control[band].ht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].ht_mcs[i] ==
+ sband->ht_cap.mcs.rx_mask[i])
+ ht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (mask->control[band].vht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].vht_mcs[i] ==
+ ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
+ vht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ if (ht_nss_mask != vht_nss_mask)
+ return false;
+
+ if (ht_nss_mask == 0)
+ return false;
+
+ if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
+ return false;
+
+ *nss = fls(ht_nss_mask);
+
+ return true;
+}
+
+static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
+ u8 rate, u8 nss, u8 sgi, u8 ldpc)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
+ arvif->vdev_id, rate, nss, sgi);
+
+ vdev_param = ar->wmi.vdev_param->fixed_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
+ if (ret) {
+ ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
+ rate, ret);
+ return ret;
+ }
+
+ vdev_param = ar->wmi.vdev_param->nss;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
+ if (ret) {
+ ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
+ return ret;
+ }
+
+ vdev_param = ar->wmi.vdev_param->sgi;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
+ if (ret) {
+ ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
+ return ret;
+ }
+
+ vdev_param = ar->wmi.vdev_param->ldpc;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
+ if (ret) {
+ ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ bool allow_pfr)
+{
+ int i;
+ u16 vht_mcs;
+
+ /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
+ * to express all VHT MCS rate masks. Effectively only the following
+ * ranges can be used: none, 0-7, 0-8 and 0-9.
+ */
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = mask->control[band].vht_mcs[i];
+
+ switch (vht_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(9) - 1:
+ case BIT(10) - 1:
+ break;
+ default:
+ if (!allow_pfr)
+ ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool ath10k_mac_set_vht_bitrate_mask_fixup(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ int err;
+ u8 rate = arvif->vht_pfr;
+
+ /* skip non vht and multiple rate peers */
+ if (!sta->deflink.vht_cap.vht_supported || arvif->vht_num_rates != 1)
+ return false;
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_PARAM_FIXED_RATE, rate);
+ if (err)
+ ath10k_warn(ar, "failed to enable STA %pM peer fixed rate: %d\n",
+ sta->addr, err);
+
+ return true;
+}
+
+static void ath10k_mac_set_bitrate_mask_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_vif *arvif = data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arvif->ar;
+
+ if (arsta->arvif != arvif)
+ return;
+
+ if (ath10k_mac_set_vht_bitrate_mask_fixup(ar, arvif, sta))
+ return;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(ar->hw, &arsta->update_wk);
+}
+
+static void ath10k_mac_clr_bitrate_mask_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_vif *arvif = data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arvif->ar;
+ int err;
+
+ /* clear vht peers only */
+ if (arsta->arvif != arvif || !sta->deflink.vht_cap.vht_supported)
+ return;
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_PARAM_FIXED_RATE,
+ WMI_FIXED_RATE_NONE);
+ if (err)
+ ath10k_warn(ar, "failed to clear STA %pM peer fixed rate: %d\n",
+ sta->addr, err);
+}
+
+static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ struct ath10k *ar = arvif->ar;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u8 rate;
+ u8 nss;
+ u8 sgi;
+ u8 ldpc;
+ int single_nss;
+ int ret;
+ int vht_num_rates, allow_pfr;
+ u8 vht_pfr;
+ bool update_bitrate_mask = true;
+
+ if (ath10k_mac_vif_chan(vif, &def))
+ return -EPERM;
+
+ band = def.chan->band;
+ ht_mcs_mask = mask->control[band].ht_mcs;
+ vht_mcs_mask = mask->control[band].vht_mcs;
+ ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
+
+ sgi = mask->control[band].gi;
+ if (sgi == NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+
+ allow_pfr = test_bit(ATH10K_FW_FEATURE_PEER_FIXED_RATE,
+ ar->normal_mode_fw.fw_file.fw_features);
+ if (allow_pfr) {
+ mutex_lock(&ar->conf_mutex);
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath10k_mac_clr_bitrate_mask_iter,
+ arvif);
+ mutex_unlock(&ar->conf_mutex);
+ }
+
+ if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask,
+ &vht_num_rates)) {
+ ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
+ &rate, &nss,
+ false);
+ if (ret) {
+ ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+ &single_nss)) {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = single_nss;
+ } else {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = min(ar->num_rf_chains,
+ max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+ ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
+ if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask,
+ allow_pfr)) {
+ u8 vht_nss;
+
+ if (!allow_pfr || vht_num_rates != 1)
+ return -EINVAL;
+
+ /* Reach here, firmware supports peer fixed rate and has
+ * single vht rate, and don't update vif birate_mask, as
+ * the rate only for specific peer.
+ */
+ ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
+ &vht_pfr,
+ &vht_nss,
+ true);
+ update_bitrate_mask = false;
+ } else {
+ vht_pfr = 0;
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (update_bitrate_mask)
+ arvif->bitrate_mask = *mask;
+ arvif->vht_num_rates = vht_num_rates;
+ arvif->vht_pfr = vht_pfr;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath10k_mac_set_bitrate_mask_iter,
+ arvif);
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
+ if (ret) {
+ ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto exit;
+ }
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed)
+{
+ struct ieee80211_sta *sta = link_sta->sta;
+ struct ath10k *ar = hw->priv;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_peer *peer;
+ u32 bw, smps;
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->data_lock);
+ ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_STA,
+ "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
+ sta->addr, changed, sta->deflink.bandwidth,
+ sta->deflink.rx_nss,
+ sta->deflink.smps_mode);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw = WMI_PEER_CHWIDTH_40MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ bw = WMI_PEER_CHWIDTH_80MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ bw = WMI_PEER_CHWIDTH_160MHZ;
+ break;
+ default:
+ ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
+ sta->deflink.bandwidth, sta->addr);
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ }
+
+ arsta->bw = bw;
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED)
+ arsta->nss = sta->deflink.rx_nss;
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ smps = WMI_PEER_SMPS_PS_NONE;
+
+ switch (sta->deflink.smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_OFF:
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ smps = WMI_PEER_SMPS_STATIC;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ smps = WMI_PEER_SMPS_DYNAMIC;
+ break;
+ case IEEE80211_SMPS_NUM_MODES:
+ ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
+ sta->deflink.smps_mode, sta->addr);
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ }
+
+ arsta->smps = smps;
+ }
+
+ arsta->changed |= changed;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(hw, &arsta->update_wk);
+}
+
+static void ath10k_offset_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, s64 tsf_offset)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ u32 offset, vdev_param;
+ int ret;
+
+ if (tsf_offset < 0) {
+ vdev_param = ar->wmi.vdev_param->dec_tsf;
+ offset = -tsf_offset;
+ } else {
+ vdev_param = ar->wmi.vdev_param->inc_tsf;
+ offset = tsf_offset;
+ }
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, offset);
+
+ if (ret && ret != -EOPNOTSUPP)
+ ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n",
+ offset, vdev_param, ret);
+}
+
+static int ath10k_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %u action %d\n",
+ arvif->vdev_id, sta->addr, tid, action);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ /* HTT AddBa/DelBa events trigger mac80211 Rx BA session
+ * creation/removal. Do we need to verify this?
+ */
+ return 0;
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* Firmware offloads Tx aggregation entirely so deny mac80211
+ * Tx aggregation requests.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ return -EINVAL;
+}
+
+static void
+ath10k_mac_update_rx_channel(struct ath10k *ar,
+ struct ieee80211_chanctx_conf *ctx,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs)
+{
+ struct cfg80211_chan_def *def = NULL;
+
+ /* Both locks are required because ar->rx_channel is modified. This
+ * allows readers to hold either lock.
+ */
+ lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_held(&ar->data_lock);
+
+ WARN_ON(ctx && vifs);
+ WARN_ON(vifs && !n_vifs);
+
+ /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
+ * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
+ * ppdu on Rx may reduce performance on low-end systems. It should be
+ * possible to make tables/hashmaps to speed the lookup up (be vary of
+ * cpu data cache lines though regarding sizes) but to keep the initial
+ * implementation simple and less intrusive fallback to the slow lookup
+ * only for multi-channel cases. Single-channel cases will remain to
+ * use the old channel derival and thus performance should not be
+ * affected much.
+ */
+ rcu_read_lock();
+ if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_get_any_chandef_iter,
+ &def);
+
+ if (vifs)
+ def = &vifs[0].new_ctx->def;
+
+ ar->rx_channel = def->chan;
+ } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
+ (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
+ /* During driver restart due to firmware assert, since mac80211
+ * already has valid channel context for given radio, channel
+ * context iteration return num_chanctx > 0. So fix rx_channel
+ * when restart is in progress.
+ */
+ ar->rx_channel = ctx->def.chan;
+ } else {
+ ar->rx_channel = NULL;
+ }
+ rcu_read_unlock();
+}
+
+static void
+ath10k_mac_update_vif_chan(struct ath10k *ar,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs)
+{
+ struct ath10k_vif *arvif;
+ int ret;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* First stop monitor interface. Some FW versions crash if there's a
+ * lone monitor interface.
+ */
+ if (ar->monitor_started)
+ ath10k_monitor_stop(ar);
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = (void *)vifs[i].vif->drv_priv;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
+ arvif->vdev_id,
+ vifs[i].old_ctx->def.chan->center_freq,
+ vifs[i].new_ctx->def.chan->center_freq,
+ vifs[i].old_ctx->def.width,
+ vifs[i].new_ctx->def.width);
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ if (WARN_ON(!arvif->is_up))
+ continue;
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to down vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ /* All relevant vdevs are downed and associated channel resources
+ * should be available for the channel switch now.
+ */
+
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
+ spin_unlock_bh(&ar->data_lock);
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = (void *)vifs[i].vif->drv_priv;
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ if (WARN_ON(!arvif->is_up))
+ continue;
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+ ret);
+
+ ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
+ if (ret) {
+ ath10k_warn(ar, "failed to restart vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ ath10k_monitor_recalc(ar);
+}
+
+static int
+ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx add freq %u width %d ptr %p\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_recalc_radar_detection(ar);
+ ath10k_monitor_recalc(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static void
+ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx remove freq %u width %d ptr %p\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_recalc_radar_detection(ar);
+ ath10k_monitor_recalc(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+struct ath10k_mac_change_chanctx_arg {
+ struct ieee80211_chanctx_conf *ctx;
+ struct ieee80211_vif_chanctx_switch *vifs;
+ int n_vifs;
+ int next_vif;
+};
+
+static void
+ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_mac_change_chanctx_arg *arg = data;
+
+ if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
+ return;
+
+ arg->n_vifs++;
+}
+
+static void
+ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_chanctx_conf *ctx;
+
+ ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
+ if (ctx != arg->ctx)
+ return;
+
+ if (WARN_ON(arg->next_vif == arg->n_vifs))
+ return;
+
+ arg->vifs[arg->next_vif].vif = vif;
+ arg->vifs[arg->next_vif].old_ctx = ctx;
+ arg->vifs[arg->next_vif].new_ctx = ctx;
+ arg->next_vif++;
+}
+
+static void
+ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx change freq %u width %d ptr %p changed %x\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
+
+ /* This shouldn't really happen because channel switching should use
+ * switch_vif_chanctx().
+ */
+ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
+ goto unlock;
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
+ ieee80211_iterate_active_interfaces_atomic(
+ hw,
+ ATH10K_ITER_NORMAL_FLAGS,
+ ath10k_mac_change_chanctx_cnt_iter,
+ &arg);
+ if (arg.n_vifs == 0)
+ goto radar;
+
+ arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
+ GFP_KERNEL);
+ if (!arg.vifs)
+ goto radar;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ hw,
+ ATH10K_ITER_NORMAL_FLAGS,
+ ath10k_mac_change_chanctx_fill_iter,
+ &arg);
+ ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
+ kfree(arg.vifs);
+ }
+
+radar:
+ ath10k_recalc_radar_detection(ar);
+
+ /* FIXME: How to configure Rx chains properly? */
+
+ /* No other actions are actually necessary. Firmware maintains channel
+ * definitions per vdev internally and there's no host-side channel
+ * context abstraction to configure, e.g. channel width.
+ */
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx assign ptr %p vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ if (WARN_ON(arvif->is_started)) {
+ mutex_unlock(&ar->conf_mutex);
+ return -EBUSY;
+ }
+
+ ret = ath10k_vdev_start(arvif, &ctx->def);
+ if (ret) {
+ ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ ctx->def.chan->center_freq, ret);
+ goto err;
+ }
+
+ arvif->is_started = true;
+
+ ret = ath10k_mac_vif_setup_ps(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
+ arvif->vdev_id, ret);
+ goto err_stop;
+ }
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_stop;
+ }
+
+ arvif->is_up = true;
+ }
+
+ if (ath10k_mac_can_set_cts_prot(arvif)) {
+ ret = ath10k_mac_set_cts_prot(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (ath10k_peer_stats_enabled(ar) &&
+ ar->hw_params.tx_stats_over_pktlog) {
+ ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS;
+ ret = ath10k_wmi_pdev_pktlog_enable(ar,
+ ar->pktlog_filter);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable pktlog %d\n", ret);
+ goto err_stop;
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_stop:
+ ath10k_vdev_stop(arvif);
+ arvif->is_started = false;
+ ath10k_mac_vif_setup_ps(arvif);
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void
+ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx unassign ptr %p vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ WARN_ON(!arvif->is_started);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ WARN_ON(!arvif->is_up);
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+ }
+
+ ret = ath10k_vdev_stop(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx switch n_vifs %d mode %d\n",
+ n_vifs, mode);
+ ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+}
+
+static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k *ar;
+ struct ath10k_peer *peer;
+
+ ar = hw->priv;
+
+ list_for_each_entry(peer, &ar->peers, list)
+ if (peer->sta == sta)
+ peer->removed = true;
+}
+
+/* HT MCS parameters with Nss = 1 */
+static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss1[] = {
+ /* MCS L20 L40 S20 S40 */
+ {0, { 65, 135, 72, 150} },
+ {1, { 130, 270, 144, 300} },
+ {2, { 195, 405, 217, 450} },
+ {3, { 260, 540, 289, 600} },
+ {4, { 390, 810, 433, 900} },
+ {5, { 520, 1080, 578, 1200} },
+ {6, { 585, 1215, 650, 1350} },
+ {7, { 650, 1350, 722, 1500} }
+};
+
+/* HT MCS parameters with Nss = 2 */
+static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss2[] = {
+ /* MCS L20 L40 S20 S40 */
+ {0, {130, 270, 144, 300} },
+ {1, {260, 540, 289, 600} },
+ {2, {390, 810, 433, 900} },
+ {3, {520, 1080, 578, 1200} },
+ {4, {780, 1620, 867, 1800} },
+ {5, {1040, 2160, 1156, 2400} },
+ {6, {1170, 2430, 1300, 2700} },
+ {7, {1300, 2700, 1444, 3000} }
+};
+
+/* MCS parameters with Nss = 1 */
+static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss1[] = {
+ /* MCS L80 S80 L40 S40 L20 S20 */
+ {0, {293, 325}, {135, 150}, {65, 72} },
+ {1, {585, 650}, {270, 300}, {130, 144} },
+ {2, {878, 975}, {405, 450}, {195, 217} },
+ {3, {1170, 1300}, {540, 600}, {260, 289} },
+ {4, {1755, 1950}, {810, 900}, {390, 433} },
+ {5, {2340, 2600}, {1080, 1200}, {520, 578} },
+ {6, {2633, 2925}, {1215, 1350}, {585, 650} },
+ {7, {2925, 3250}, {1350, 1500}, {650, 722} },
+ {8, {3510, 3900}, {1620, 1800}, {780, 867} },
+ {9, {3900, 4333}, {1800, 2000}, {865, 960} }
+};
+
+/*MCS parameters with Nss = 2 */
+static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss2[] = {
+ /* MCS L80 S80 L40 S40 L20 S20 */
+ {0, {585, 650}, {270, 300}, {130, 144} },
+ {1, {1170, 1300}, {540, 600}, {260, 289} },
+ {2, {1755, 1950}, {810, 900}, {390, 433} },
+ {3, {2340, 2600}, {1080, 1200}, {520, 578} },
+ {4, {3510, 3900}, {1620, 1800}, {780, 867} },
+ {5, {4680, 5200}, {2160, 2400}, {1040, 1156} },
+ {6, {5265, 5850}, {2430, 2700}, {1170, 1300} },
+ {7, {5850, 6500}, {2700, 3000}, {1300, 1444} },
+ {8, {7020, 7800}, {3240, 3600}, {1560, 1733} },
+ {9, {7800, 8667}, {3600, 4000}, {1730, 1920} }
+};
+
+static void ath10k_mac_get_rate_flags_ht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
+ u8 *flags, u8 *bw)
+{
+ struct ath10k_index_ht_data_rate_type *mcs_rate;
+ u8 index;
+ size_t len_nss1 = ARRAY_SIZE(supported_ht_mcs_rate_nss1);
+ size_t len_nss2 = ARRAY_SIZE(supported_ht_mcs_rate_nss2);
+
+ if (mcs >= (len_nss1 + len_nss2)) {
+ ath10k_warn(ar, "not supported mcs %d in current rate table", mcs);
+ return;
+ }
+
+ mcs_rate = (struct ath10k_index_ht_data_rate_type *)
+ ((nss == 1) ? &supported_ht_mcs_rate_nss1 :
+ &supported_ht_mcs_rate_nss2);
+
+ if (mcs >= len_nss1)
+ index = mcs - len_nss1;
+ else
+ index = mcs;
+
+ if (rate == mcs_rate[index].supported_rate[0]) {
+ *bw = RATE_INFO_BW_20;
+ } else if (rate == mcs_rate[index].supported_rate[1]) {
+ *bw |= RATE_INFO_BW_40;
+ } else if (rate == mcs_rate[index].supported_rate[2]) {
+ *bw |= RATE_INFO_BW_20;
+ *flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate == mcs_rate[index].supported_rate[3]) {
+ *bw |= RATE_INFO_BW_40;
+ *flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else {
+ ath10k_warn(ar, "invalid ht params rate %d 100kbps nss %d mcs %d",
+ rate, nss, mcs);
+ }
+}
+
+static void ath10k_mac_get_rate_flags_vht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
+ u8 *flags, u8 *bw)
+{
+ struct ath10k_index_vht_data_rate_type *mcs_rate;
+
+ mcs_rate = (struct ath10k_index_vht_data_rate_type *)
+ ((nss == 1) ? &supported_vht_mcs_rate_nss1 :
+ &supported_vht_mcs_rate_nss2);
+
+ if (rate == mcs_rate[mcs].supported_VHT80_rate[0]) {
+ *bw = RATE_INFO_BW_80;
+ } else if (rate == mcs_rate[mcs].supported_VHT80_rate[1]) {
+ *bw = RATE_INFO_BW_80;
+ *flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate == mcs_rate[mcs].supported_VHT40_rate[0]) {
+ *bw = RATE_INFO_BW_40;
+ } else if (rate == mcs_rate[mcs].supported_VHT40_rate[1]) {
+ *bw = RATE_INFO_BW_40;
+ *flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (rate == mcs_rate[mcs].supported_VHT20_rate[0]) {
+ *bw = RATE_INFO_BW_20;
+ } else if (rate == mcs_rate[mcs].supported_VHT20_rate[1]) {
+ *bw = RATE_INFO_BW_20;
+ *flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else {
+ ath10k_warn(ar, "invalid vht params rate %d 100kbps nss %d mcs %d",
+ rate, nss, mcs);
+ }
+}
+
+static void ath10k_mac_get_rate_flags(struct ath10k *ar, u32 rate,
+ enum ath10k_phy_mode mode, u8 nss, u8 mcs,
+ u8 *flags, u8 *bw)
+{
+ if (mode == ATH10K_PHY_MODE_HT) {
+ *flags = RATE_INFO_FLAGS_MCS;
+ ath10k_mac_get_rate_flags_ht(ar, rate, nss, mcs, flags, bw);
+ } else if (mode == ATH10K_PHY_MODE_VHT) {
+ *flags = RATE_INFO_FLAGS_VHT_MCS;
+ ath10k_mac_get_rate_flags_vht(ar, rate, nss, mcs, flags, bw);
+ }
+}
+
+static void ath10k_mac_parse_bitrate(struct ath10k *ar, u32 rate_code,
+ u32 bitrate_kbps, struct rate_info *rate)
+{
+ enum ath10k_phy_mode mode = ATH10K_PHY_MODE_LEGACY;
+ enum wmi_rate_preamble preamble = WMI_TLV_GET_HW_RC_PREAM_V1(rate_code);
+ u8 nss = WMI_TLV_GET_HW_RC_NSS_V1(rate_code) + 1;
+ u8 mcs = WMI_TLV_GET_HW_RC_RATE_V1(rate_code);
+ u8 flags = 0, bw = 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac parse rate code 0x%x bitrate %d kbps\n",
+ rate_code, bitrate_kbps);
+
+ if (preamble == WMI_RATE_PREAMBLE_HT)
+ mode = ATH10K_PHY_MODE_HT;
+ else if (preamble == WMI_RATE_PREAMBLE_VHT)
+ mode = ATH10K_PHY_MODE_VHT;
+
+ ath10k_mac_get_rate_flags(ar, bitrate_kbps / 100, mode, nss, mcs, &flags, &bw);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac parse bitrate preamble %d mode %d nss %d mcs %d flags %x bw %d\n",
+ preamble, mode, nss, mcs, flags, bw);
+
+ rate->flags = flags;
+ rate->bw = bw;
+ rate->legacy = bitrate_kbps / 100;
+ rate->nss = nss;
+ rate->mcs = mcs;
+}
+
+static void ath10k_mac_sta_get_peer_stats_info(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k_peer *peer;
+ unsigned long time_left;
+ int ret;
+
+ if (!(ar->hw_params.supports_peer_stats_info &&
+ arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA))
+ return;
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arsta->arvif->vdev_id, sta->addr);
+ spin_unlock_bh(&ar->data_lock);
+ if (!peer)
+ return;
+
+ reinit_completion(&ar->peer_stats_info_complete);
+
+ ret = ath10k_wmi_request_peer_stats_info(ar,
+ arsta->arvif->vdev_id,
+ WMI_REQUEST_ONE_PEER_STATS_INFO,
+ arsta->arvif->bssid,
+ 0);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "could not request peer stats info: %d\n", ret);
+ return;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->peer_stats_info_complete, 3 * HZ);
+ if (time_left == 0) {
+ ath10k_warn(ar, "timed out waiting peer stats info\n");
+ return;
+ }
+
+ if (arsta->rx_rate_code != 0 && arsta->rx_bitrate_kbps != 0) {
+ ath10k_mac_parse_bitrate(ar, arsta->rx_rate_code,
+ arsta->rx_bitrate_kbps,
+ &sinfo->rxrate);
+
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
+ arsta->rx_rate_code = 0;
+ arsta->rx_bitrate_kbps = 0;
+ }
+
+ if (arsta->tx_rate_code != 0 && arsta->tx_bitrate_kbps != 0) {
+ ath10k_mac_parse_bitrate(ar, arsta->tx_rate_code,
+ arsta->tx_bitrate_kbps,
+ &sinfo->txrate);
+
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ arsta->tx_rate_code = 0;
+ arsta->tx_bitrate_kbps = 0;
+ }
+}
+
+static void ath10k_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+
+ if (!ath10k_peer_stats_enabled(ar))
+ return;
+
+ mutex_lock(&ar->conf_mutex);
+ ath10k_debug_fw_stats_request(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ sinfo->rx_duration = arsta->rx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+
+ if (arsta->txrate.legacy || arsta->txrate.nss) {
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
+
+ if (ar->htt.disable_tx_comp) {
+ sinfo->tx_failed = arsta->tx_failed;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+ }
+
+ sinfo->tx_retries = arsta->tx_retries;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+
+ ath10k_mac_sta_get_peer_stats_info(ar, sta, sinfo);
+}
+
+static int ath10k_mac_op_set_tid_config(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_config)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_mac_iter_tid_conf_data data = {};
+ struct wmi_per_peer_per_tid_cfg_arg arg = {};
+ int ret, i;
+
+ mutex_lock(&ar->conf_mutex);
+ arg.vdev_id = arvif->vdev_id;
+
+ arvif->tids_rst = 0;
+ memset(arvif->tid_conf_changed, 0, sizeof(arvif->tid_conf_changed));
+
+ for (i = 0; i < tid_config->n_tid_conf; i++) {
+ ret = ath10k_mac_parse_tid_config(ar, sta, vif,
+ &tid_config->tid_conf[i],
+ &arg);
+ if (ret)
+ goto exit;
+ }
+
+ ret = 0;
+
+ if (sta)
+ goto exit;
+
+ arvif->tids_rst = 0;
+ data.curr_vif = vif;
+ data.ar = ar;
+
+ ieee80211_iterate_stations_atomic(hw, ath10k_mac_vif_stations_tid_conf,
+ &data);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_mac_op_reset_tid_config(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u8 tids)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_mac_iter_tid_conf_data data = {};
+ struct ath10k *ar = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (sta) {
+ arvif->tids_rst = 0;
+ ret = ath10k_mac_reset_tid_config(ar, sta, arvif, tids);
+ goto exit;
+ }
+
+ arvif->tids_rst = tids;
+ data.curr_vif = vif;
+ data.ar = ar;
+ ieee80211_iterate_stations_atomic(hw, ath10k_mac_vif_stations_tid_conf,
+ &data);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct ieee80211_ops ath10k_ops = {
+ .tx = ath10k_mac_op_tx,
+ .wake_tx_queue = ath10k_mac_op_wake_tx_queue,
+ .start = ath10k_start,
+ .stop = ath10k_stop,
+ .config = ath10k_config,
+ .add_interface = ath10k_add_interface,
+ .update_vif_offload = ath10k_update_vif_offload,
+ .remove_interface = ath10k_remove_interface,
+ .configure_filter = ath10k_configure_filter,
+ .bss_info_changed = ath10k_bss_info_changed,
+ .set_coverage_class = ath10k_mac_op_set_coverage_class,
+ .hw_scan = ath10k_hw_scan,
+ .cancel_hw_scan = ath10k_cancel_hw_scan,
+ .set_key = ath10k_set_key,
+ .set_default_unicast_key = ath10k_set_default_unicast_key,
+ .sta_state = ath10k_sta_state,
+ .sta_set_txpwr = ath10k_sta_set_txpwr,
+ .conf_tx = ath10k_conf_tx,
+ .remain_on_channel = ath10k_remain_on_channel,
+ .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
+ .set_rts_threshold = ath10k_set_rts_threshold,
+ .set_frag_threshold = ath10k_mac_op_set_frag_threshold,
+ .flush = ath10k_flush,
+ .tx_last_beacon = ath10k_tx_last_beacon,
+ .set_antenna = ath10k_set_antenna,
+ .get_antenna = ath10k_get_antenna,
+ .reconfig_complete = ath10k_reconfig_complete,
+ .get_survey = ath10k_get_survey,
+ .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
+ .link_sta_rc_update = ath10k_sta_rc_update,
+ .offset_tsf = ath10k_offset_tsf,
+ .ampdu_action = ath10k_ampdu_action,
+ .get_et_sset_count = ath10k_debug_get_et_sset_count,
+ .get_et_stats = ath10k_debug_get_et_stats,
+ .get_et_strings = ath10k_debug_get_et_strings,
+ .add_chanctx = ath10k_mac_op_add_chanctx,
+ .remove_chanctx = ath10k_mac_op_remove_chanctx,
+ .change_chanctx = ath10k_mac_op_change_chanctx,
+ .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx,
+ .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
+ .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
+ .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove,
+ .sta_statistics = ath10k_sta_statistics,
+ .set_tid_config = ath10k_mac_op_set_tid_config,
+ .reset_tid_config = ath10k_mac_op_reset_tid_config,
+
+ CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
+
+#ifdef CONFIG_PM
+ .suspend = ath10k_wow_op_suspend,
+ .resume = ath10k_wow_op_resume,
+ .set_wakeup = ath10k_wow_op_set_wakeup,
+#endif
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .sta_add_debugfs = ath10k_sta_add_debugfs,
+#endif
+ .set_sar_specs = ath10k_mac_set_sar_specs,
+};
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel ath10k_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_channel ath10k_5ghz_channels[] = {
+ CHAN5G(36, 5180, 0),
+ CHAN5G(40, 5200, 0),
+ CHAN5G(44, 5220, 0),
+ CHAN5G(48, 5240, 0),
+ CHAN5G(52, 5260, 0),
+ CHAN5G(56, 5280, 0),
+ CHAN5G(60, 5300, 0),
+ CHAN5G(64, 5320, 0),
+ CHAN5G(100, 5500, 0),
+ CHAN5G(104, 5520, 0),
+ CHAN5G(108, 5540, 0),
+ CHAN5G(112, 5560, 0),
+ CHAN5G(116, 5580, 0),
+ CHAN5G(120, 5600, 0),
+ CHAN5G(124, 5620, 0),
+ CHAN5G(128, 5640, 0),
+ CHAN5G(132, 5660, 0),
+ CHAN5G(136, 5680, 0),
+ CHAN5G(140, 5700, 0),
+ CHAN5G(144, 5720, 0),
+ CHAN5G(149, 5745, 0),
+ CHAN5G(153, 5765, 0),
+ CHAN5G(157, 5785, 0),
+ CHAN5G(161, 5805, 0),
+ CHAN5G(165, 5825, 0),
+ CHAN5G(169, 5845, 0),
+ CHAN5G(173, 5865, 0),
+ /* If you add more, you may need to change ATH10K_MAX_5G_CHAN */
+ /* And you will definitely need to change ATH10K_NUM_CHANS in core.h */
+};
+
+struct ath10k *ath10k_mac_create(size_t priv_size)
+{
+ struct ieee80211_hw *hw;
+ struct ieee80211_ops *ops;
+ struct ath10k *ar;
+
+ ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL);
+ if (!ops)
+ return NULL;
+
+ hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops);
+ if (!hw) {
+ kfree(ops);
+ return NULL;
+ }
+
+ ar = hw->priv;
+ ar->hw = hw;
+ ar->ops = ops;
+
+ return ar;
+}
+
+void ath10k_mac_destroy(struct ath10k *ar)
+{
+ struct ieee80211_ops *ops = ar->ops;
+
+ ieee80211_free_hw(ar->hw);
+ kfree(ops);
+}
+
+static const struct ieee80211_iface_limit ath10k_if_limits[] = {
+ {
+ .max = 8,
+ .types = BIT(NL80211_IFTYPE_STATION)
+ | BIT(NL80211_IFTYPE_P2P_CLIENT)
+ },
+ {
+ .max = 3,
+ .types = BIT(NL80211_IFTYPE_P2P_GO)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
+ },
+ {
+ .max = 7,
+ .types = BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+ | BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
+ {
+ .max = 8,
+ .types = BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+ | BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION)
+ },
+};
+
+static const struct ieee80211_iface_combination ath10k_if_comb[] = {
+ {
+ .limits = ath10k_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_if_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ },
+};
+
+static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
+ {
+ .limits = ath10k_10x_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .beacon_int_min_gcd = 1,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+#endif
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC),
+ },
+};
+
+/* FIXME: This is not thoroughly tested. These combinations may over- or
+ * underestimate hw/fw capabilities.
+ */
+static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
+ {
+ .limits = ath10k_tlv_if_limit,
+ .num_different_channels = 1,
+ .max_interfaces = 4,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+ },
+ {
+ .limits = ath10k_tlv_if_limit_ibss,
+ .num_different_channels = 1,
+ .max_interfaces = 2,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+ },
+};
+
+static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
+ {
+ .limits = ath10k_tlv_if_limit,
+ .num_different_channels = 1,
+ .max_interfaces = 4,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+ },
+ {
+ .limits = ath10k_tlv_qcs_if_limit,
+ .num_different_channels = 2,
+ .max_interfaces = 4,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
+ },
+ {
+ .limits = ath10k_tlv_if_limit_ibss,
+ .num_different_channels = 1,
+ .max_interfaces = 2,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 16,
+ .types = BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+ | BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+ },
+};
+
+static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
+ {
+ .limits = ath10k_10_4_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
+ .max_interfaces = 16,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .beacon_int_min_gcd = 1,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160),
+#endif
+ },
+};
+
+static const struct
+ieee80211_iface_combination ath10k_10_4_bcn_int_if_comb[] = {
+ {
+ .limits = ath10k_10_4_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
+ .max_interfaces = 16,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .beacon_int_min_gcd = 100,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160),
+#endif
+ },
+};
+
+static void ath10k_get_arvif_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif_iter *arvif_iter = data;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+
+ if (arvif->vdev_id == arvif_iter->vdev_id)
+ arvif_iter->arvif = arvif;
+}
+
+struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
+{
+ struct ath10k_vif_iter arvif_iter;
+
+ memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
+ arvif_iter.vdev_id = vdev_id;
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ ATH10K_ITER_RESUME_FLAGS,
+ ath10k_get_arvif_iter,
+ &arvif_iter);
+ if (!arvif_iter.arvif) {
+ ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
+ return NULL;
+ }
+
+ return arvif_iter.arvif;
+}
+
+#define WRD_METHOD "WRDD"
+#define WRDD_WIFI (0x07)
+
+static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd)
+{
+ union acpi_object *mcc_pkg;
+ union acpi_object *domain_type;
+ union acpi_object *mcc_value;
+ u32 i;
+
+ if (wrdd->type != ACPI_TYPE_PACKAGE ||
+ wrdd->package.count < 2 ||
+ wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ wrdd->package.elements[0].integer.value != 0) {
+ ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n");
+ return 0;
+ }
+
+ for (i = 1; i < wrdd->package.count; ++i) {
+ mcc_pkg = &wrdd->package.elements[i];
+
+ if (mcc_pkg->type != ACPI_TYPE_PACKAGE)
+ continue;
+ if (mcc_pkg->package.count < 2)
+ continue;
+ if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
+ continue;
+
+ domain_type = &mcc_pkg->package.elements[0];
+ if (domain_type->integer.value != WRDD_WIFI)
+ continue;
+
+ mcc_value = &mcc_pkg->package.elements[1];
+ return mcc_value->integer.value;
+ }
+ return 0;
+}
+
+static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd)
+{
+ acpi_handle root_handle;
+ acpi_handle handle;
+ struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+ u32 alpha2_code;
+ char alpha2[3];
+
+ root_handle = ACPI_HANDLE(ar->dev);
+ if (!root_handle)
+ return -EOPNOTSUPP;
+
+ status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "failed to get wrd method %d\n", status);
+ return -EIO;
+ }
+
+ status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
+ if (ACPI_FAILURE(status)) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "failed to call wrdc %d\n", status);
+ return -EIO;
+ }
+
+ alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer);
+ kfree(wrdd.pointer);
+ if (!alpha2_code)
+ return -EIO;
+
+ alpha2[0] = (alpha2_code >> 8) & 0xff;
+ alpha2[1] = (alpha2_code >> 0) & 0xff;
+ alpha2[2] = '\0';
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "regulatory hint from WRDD (alpha2-code): %s\n", alpha2);
+
+ *rd = ath_regd_find_country_by_name(alpha2);
+ if (*rd == 0xffff)
+ return -EIO;
+
+ *rd |= COUNTRY_ERD_FLAG;
+ return 0;
+}
+
+static int ath10k_mac_init_rd(struct ath10k *ar)
+{
+ int ret;
+ u16 rd;
+
+ ret = ath10k_mac_get_wrdd_regulatory(ar, &rd);
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "fallback to eeprom programmed regulatory settings\n");
+ rd = ar->hw_eeprom_rd;
+ }
+
+ ar->ath_common.regulatory.current_rd = rd;
+ return 0;
+}
+
+int ath10k_mac_register(struct ath10k *ar)
+{
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+
+ /* Do not add hardware supported ciphers before this line.
+ * Allow software encryption for all chips. Don't forget to
+ * update n_cipher_suites below.
+ */
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ WLAN_CIPHER_SUITE_BIP_CMAC_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+
+ /* Only QCA99x0 and QCA4019 variants support GCMP-128, GCMP-256
+ * and CCMP-256 in hardware.
+ */
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_CCMP_256,
+ };
+ struct ieee80211_supported_band *band;
+ void *channels;
+ int ret;
+
+ if (!is_valid_ether_addr(ar->mac_addr)) {
+ ath10k_warn(ar, "invalid MAC address; choosing random\n");
+ eth_random_addr(ar->mac_addr);
+ }
+ SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+
+ SET_IEEE80211_DEV(ar->hw, ar->dev);
+
+ BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
+ ARRAY_SIZE(ath10k_5ghz_channels)) !=
+ ATH10K_NUM_CHANS);
+
+ if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
+ channels = kmemdup(ath10k_2ghz_channels,
+ sizeof(ath10k_2ghz_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
+ band->channels = channels;
+
+ if (ar->hw_params.cck_rate_map_rev2) {
+ band->n_bitrates = ath10k_g_rates_rev2_size;
+ band->bitrates = ath10k_g_rates_rev2;
+ } else {
+ band->n_bitrates = ath10k_g_rates_size;
+ band->bitrates = ath10k_g_rates;
+ }
+
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+ }
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
+ channels = kmemdup(ath10k_5ghz_channels,
+ sizeof(ath10k_5ghz_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath10k_a_rates_size;
+ band->bitrates = ath10k_a_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+ }
+
+ wiphy_read_of_freq_limits(ar->hw->wiphy);
+ ath10k_mac_setup_ht_vht_cap(ar);
+
+ ar->hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
+ ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
+
+ if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
+ ar->hw->wiphy->interface_modes |=
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+
+ ieee80211_hw_set(ar->hw, SIGNAL_DBM);
+
+ if (!test_bit(ATH10K_FW_FEATURE_NO_PS,
+ ar->running_fw->fw_file.fw_features)) {
+ ieee80211_hw_set(ar->hw, SUPPORTS_PS);
+ ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
+ }
+
+ ieee80211_hw_set(ar->hw, MFP_CAPABLE);
+ ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(ar->hw, AP_LINK_PS);
+ ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
+ ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
+ ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
+
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+ ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+
+ ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
+ ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
+ ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
+ }
+
+ ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+ ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+
+ if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
+ ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
+ ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
+ ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
+ ar->hw->wiphy->max_sched_scan_plan_interval =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
+ ar->hw->wiphy->max_sched_scan_plan_iterations =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
+ ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
+ }
+
+ ar->hw->vif_data_size = sizeof(struct ath10k_vif);
+ ar->hw->sta_data_size = sizeof(struct ath10k_sta);
+ ar->hw->txq_data_size = sizeof(struct ath10k_txq);
+
+ ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
+
+ if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+ /* Firmware delivers WPS/P2P Probe Requests frames to driver so
+ * that userspace (e.g. wpa_supplicant/hostapd) can generate
+ * correct Probe Responses. This is more of a hack advert..
+ */
+ ar->hw->wiphy->probe_resp_offload |=
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+ }
+
+ if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) ||
+ test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) {
+ ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+ if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map))
+ ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
+ }
+
+ if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
+ ieee80211_hw_set(ar->hw, SUPPORTS_TDLS_BUFFER_STA);
+
+ if (ath10k_frame_mode == ATH10K_HW_TXRX_ETHERNET) {
+ if (ar->wmi.vdev_param->tx_encap_type !=
+ WMI_VDEV_PARAM_UNSUPPORTED)
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+ }
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_AP_SCAN;
+
+ ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+
+ ret = ath10k_wow_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to init wow: %d\n", ret);
+ goto err_free;
+ }
+
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_AQL);
+
+ if (ar->hw_params.mcast_frame_registration)
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS);
+
+ if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) ||
+ test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map))
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
+
+ if (ath10k_peer_stats_enabled(ar) ||
+ test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map))
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
+
+ if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map))
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER);
+
+ if (test_bit(WMI_SERVICE_TX_PWR_PER_PEER, ar->wmi.svc_map))
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_STA_TX_PWR);
+
+ if (test_bit(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, ar->wmi.svc_map)) {
+ ar->hw->wiphy->tid_config_support.vif |=
+ BIT(NL80211_TID_CONFIG_ATTR_NOACK) |
+ BIT(NL80211_TID_CONFIG_ATTR_RETRY_SHORT) |
+ BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG) |
+ BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE);
+
+ if (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
+ ar->wmi.svc_map)) {
+ ar->hw->wiphy->tid_config_support.vif |=
+ BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL);
+ }
+
+ ar->hw->wiphy->tid_config_support.peer =
+ ar->hw->wiphy->tid_config_support.vif;
+ ar->hw->wiphy->max_data_retry_count = ATH10K_MAX_RETRY_COUNT;
+ } else {
+ ar->ops->set_tid_config = NULL;
+ }
+ /*
+ * on LL hardware queues are managed entirely by the FW
+ * so we only advertise to mac we can do the queues thing
+ */
+ ar->hw->queues = IEEE80211_MAX_QUEUES;
+
+ /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
+ * something that vdev_ids can't reach so that we don't stop the queue
+ * accidentally.
+ */
+ ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
+
+ switch (ar->running_fw->fw_file.wmi_op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
+ ar->hw->wiphy->iface_combinations = ath10k_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_if_comb);
+ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+ ar->hw->wiphy->iface_combinations =
+ ath10k_tlv_qcs_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
+ } else {
+ ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_tlv_if_comb);
+ }
+ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_10x_if_comb);
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_10_4_if_comb);
+ if (test_bit(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
+ ar->wmi.svc_map)) {
+ ar->hw->wiphy->iface_combinations =
+ ath10k_10_4_bcn_int_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_10_4_bcn_int_if_comb);
+ }
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ if (ar->hw_params.dynamic_sar_support)
+ ar->hw->wiphy->sar_capa = &ath10k_sar_capa;
+
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
+
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ /* Init ath dfs pattern detector */
+ ar->ath_common.debug_mask = ATH_DBG_DFS;
+ ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
+ NL80211_DFS_UNSET);
+
+ if (!ar->dfs_detector)
+ ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
+ }
+
+ ret = ath10k_mac_init_rd(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to derive regdom: %d\n", ret);
+ goto err_dfs_detector_exit;
+ }
+
+ /* Disable set_coverage_class for chipsets that do not support it. */
+ if (!ar->hw_params.hw_ops->set_coverage_class)
+ ar->ops->set_coverage_class = NULL;
+
+ ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
+ ath10k_reg_notifier);
+ if (ret) {
+ ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
+ goto err_dfs_detector_exit;
+ }
+
+ if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
+ ar->hw->wiphy->features |=
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ }
+
+ ar->hw->wiphy->cipher_suites = cipher_suites;
+
+ /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128
+ * and GCMP-256 ciphers in hardware. Fetch number of ciphers supported
+ * from chip specific hw_param table.
+ */
+ if (!ar->hw_params.n_cipher_suites ||
+ ar->hw_params.n_cipher_suites > ARRAY_SIZE(cipher_suites)) {
+ ath10k_err(ar, "invalid hw_params.n_cipher_suites %d\n",
+ ar->hw_params.n_cipher_suites);
+ ar->hw_params.n_cipher_suites = 8;
+ }
+ ar->hw->wiphy->n_cipher_suites = ar->hw_params.n_cipher_suites;
+
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
+ ar->hw->weight_multiplier = ATH10K_AIRTIME_WEIGHT_MULTIPLIER;
+
+ ret = ieee80211_register_hw(ar->hw);
+ if (ret) {
+ ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
+ goto err_dfs_detector_exit;
+ }
+
+ if (test_bit(WMI_SERVICE_PER_PACKET_SW_ENCRYPT, ar->wmi.svc_map)) {
+ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
+ ar->hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
+ }
+
+ if (!ath_is_world_regd(&ar->ath_common.reg_world_copy) &&
+ !ath_is_world_regd(&ar->ath_common.regulatory)) {
+ ret = regulatory_hint(ar->hw->wiphy,
+ ar->ath_common.regulatory.alpha2);
+ if (ret)
+ goto err_unregister;
+ }
+
+ return 0;
+
+err_unregister:
+ ieee80211_unregister_hw(ar->hw);
+
+err_dfs_detector_exit:
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ ar->dfs_detector->exit(ar->dfs_detector);
+
+err_free:
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+ return ret;
+}
+
+void ath10k_mac_unregister(struct ath10k *ar)
+{
+ ieee80211_unregister_hw(ar->hw);
+
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ ar->dfs_detector->exit(ar->dfs_detector);
+
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+}
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
new file mode 100644
index 000000000000..98d83a26ea60
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ */
+
+#ifndef _MAC_H_
+#define _MAC_H_
+
+#include <net/mac80211.h>
+#include "core.h"
+
+#define WEP_KEYID_SHIFT 6
+
+enum wmi_tlv_tx_pause_id;
+enum wmi_tlv_tx_pause_action;
+
+struct ath10k_generic_iter {
+ struct ath10k *ar;
+ int ret;
+};
+
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+struct ath10k *ath10k_mac_create(size_t priv_size);
+void ath10k_mac_destroy(struct ath10k *ar);
+int ath10k_mac_register(struct ath10k *ar);
+void ath10k_mac_unregister(struct ath10k *ar);
+struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
+void __ath10k_scan_finish(struct ath10k *ar);
+void ath10k_scan_finish(struct ath10k *ar);
+void ath10k_scan_timeout_work(struct work_struct *work);
+void ath10k_offchan_tx_purge(struct ath10k *ar);
+void ath10k_offchan_tx_work(struct work_struct *work);
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
+void ath10k_halt(struct ath10k *ar);
+void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
+void ath10k_drain_tx(struct ath10k *ar);
+bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
+ u8 keyidx);
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def);
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action);
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck);
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate);
+
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason);
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
+bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar);
+void ath10k_mac_tx_push_pending(struct ath10k *ar);
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+ u16 peer_id,
+ u8 tid);
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
+void ath10k_mac_wait_tx_complete(struct ath10k *ar);
+int ath10k_mac_rfkill_enable_radio(struct ath10k *ar, bool enable);
+
+static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ if (arvif->tx_seq_no == 0)
+ arvif->tx_seq_no = 0x1000;
+
+ if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+ arvif->tx_seq_no += 0x10;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(arvif->tx_seq_no);
+ }
+}
+
+#endif /* _MAC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/p2p.c b/drivers/net/wireless/ath/ath10k/p2p.c
new file mode 100644
index 000000000000..517b30f56b72
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/p2p.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ */
+
+#include "core.h"
+#include "wmi.h"
+#include "mac.h"
+#include "p2p.h"
+
+static void ath10k_p2p_noa_ie_fill(u8 *data, size_t len,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ieee80211_p2p_noa_attr *noa_attr;
+ u8 ctwindow_oppps = noa->ctwindow_oppps;
+ u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
+ bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
+ __le16 *noa_attr_len;
+ u16 attr_len;
+ u8 noa_descriptors = noa->num_descriptors;
+ int i;
+
+ /* P2P IE */
+ data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ data[1] = len - 2;
+ data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+ /* NOA ATTR */
+ data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+ noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+ noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+ noa_attr->index = noa->index;
+ noa_attr->oppps_ctwindow = ctwindow;
+ if (oppps)
+ noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+ for (i = 0; i < noa_descriptors; i++) {
+ noa_attr->desc[i].count =
+ __le32_to_cpu(noa->descriptors[i].type_count);
+ noa_attr->desc[i].duration = noa->descriptors[i].duration;
+ noa_attr->desc[i].interval = noa->descriptors[i].interval;
+ noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+ }
+
+ attr_len = 2; /* index + oppps_ctwindow */
+ attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+ *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static size_t ath10k_p2p_noa_ie_len_compute(const struct wmi_p2p_noa_info *noa)
+{
+ size_t len = 0;
+
+ if (!noa->num_descriptors &&
+ !(noa->ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT))
+ return 0;
+
+ len += 1 + 1 + 4; /* EID + len + OUI */
+ len += 1 + 2; /* noa attr + attr len */
+ len += 1 + 1; /* index + oppps_ctwindow */
+ len += noa->num_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+
+ return len;
+}
+
+static void ath10k_p2p_noa_ie_assign(struct ath10k_vif *arvif, void *ie,
+ size_t len)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ kfree(arvif->u.ap.noa_data);
+
+ arvif->u.ap.noa_data = ie;
+ arvif->u.ap.noa_len = len;
+}
+
+static void __ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ath10k *ar = arvif->ar;
+ void *ie;
+ size_t len;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath10k_p2p_noa_ie_assign(arvif, NULL, 0);
+
+ len = ath10k_p2p_noa_ie_len_compute(noa);
+ if (!len)
+ return;
+
+ ie = kmalloc(len, GFP_ATOMIC);
+ if (!ie)
+ return;
+
+ ath10k_p2p_noa_ie_fill(ie, len, noa);
+ ath10k_p2p_noa_ie_assign(arvif, ie, len);
+}
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ath10k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ __ath10k_p2p_noa_update(arvif, noa);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+struct ath10k_p2p_noa_arg {
+ u32 vdev_id;
+ const struct wmi_p2p_noa_info *noa;
+};
+
+static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_p2p_noa_arg *arg = data;
+
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
+ ath10k_p2p_noa_update(arvif, arg->noa);
+}
+
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ath10k_p2p_noa_arg arg = {
+ .vdev_id = vdev_id,
+ .noa = noa,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ ATH10K_ITER_NORMAL_FLAGS,
+ ath10k_p2p_noa_update_vdev_iter,
+ &arg);
+}
diff --git a/drivers/net/wireless/ath/ath10k/p2p.h b/drivers/net/wireless/ath/ath10k/p2p.h
new file mode 100644
index 000000000000..7d7f44809fbb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/p2p.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ */
+
+#ifndef _P2P_H
+#define _P2P_H
+
+struct ath10k_vif;
+struct wmi_p2p_noa_info;
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+ const struct wmi_p2p_noa_info *noa);
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_p2p_noa_info *noa);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
new file mode 100644
index 000000000000..97b49bf4ad80
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -0,0 +1,3855 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+
+#include "core.h"
+#include "debug.h"
+#include "coredump.h"
+
+#include "targaddrs.h"
+#include "bmi.h"
+
+#include "hif.h"
+#include "htc.h"
+
+#include "ce.h"
+#include "pci.h"
+
+enum ath10k_pci_reset_mode {
+ ATH10K_PCI_RESET_AUTO = 0,
+ ATH10K_PCI_RESET_WARM_ONLY = 1,
+};
+
+static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
+static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
+
+module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
+MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
+
+module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
+MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
+
+/* how long wait to wait for target to initialise, in ms */
+#define ATH10K_PCI_TARGET_WAIT 3000
+#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
+
+/* Maximum number of bytes that can be handled atomically by
+ * diag read and write.
+ */
+#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
+
+#define QCA99X0_PCIE_BAR0_START_REG 0x81030
+#define QCA99X0_CPU_MEM_ADDR_REG 0x4d00c
+#define QCA99X0_CPU_MEM_DATA_REG 0x4d010
+
+static const struct pci_device_id ath10k_pci_id_table[] = {
+ /* PCI-E QCA988X V2 (Ubiquiti branded) */
+ { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },
+
+ { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
+ { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
+ { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
+ { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
+ { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
+ { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
+ { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
+ { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
+ {}
+};
+
+static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
+ /* QCA988X pre 2.0 chips are not supported because they need some nasty
+ * hacks. ath10k doesn't have them and these devices crash horribly
+ * because of that.
+ */
+ { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
+ { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
+
+ { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
+ { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
+ { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
+ { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
+ { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+
+ { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
+
+ { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
+
+ { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
+
+ { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
+ { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
+
+ { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
+};
+
+static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
+static int ath10k_pci_cold_reset(struct ath10k *ar);
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
+static int ath10k_pci_init_irq(struct ath10k *ar);
+static int ath10k_pci_deinit_irq(struct ath10k *ar);
+static int ath10k_pci_request_irq(struct ath10k *ar);
+static void ath10k_pci_free_irq(struct ath10k *ar);
+static int ath10k_pci_bmi_wait(struct ath10k *ar,
+ struct ath10k_ce_pipe *tx_pipe,
+ struct ath10k_ce_pipe *rx_pipe,
+ struct bmi_xfer *xfer);
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
+static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
+
+static const struct ce_attr pci_host_ce_config_wlan[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ .send_cb = ath10k_pci_htc_tx_cb,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_pci_htt_htc_rx_cb,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ .recv_cb = ath10k_pci_htc_rx_cb,
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath10k_pci_htc_tx_cb,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ .send_cb = ath10k_pci_htt_tx_cb,
+ },
+
+ /* CE5: target->host HTT (HIF->HTT) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 512,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_pci_htt_rx_cb,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: ce_diag, the Diagnostic Window */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_POLL,
+ .src_nentries = 2,
+ .src_sz_max = DIAG_TRANSFER_LIMIT,
+ .dest_nentries = 2,
+ },
+
+ /* CE8: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ .recv_cb = ath10k_pci_pktlog_rx_cb,
+ },
+
+ /* CE9 target autonomous qcache memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE10: target autonomous hif memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE11: target autonomous hif memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+};
+
+/* Target firmware's Copy Engine configuration. */
+static const struct ce_pipe_config pci_target_ce_config_wlan[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(64),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* NB: 50% of src nentries, since tx has 2 frags */
+
+ /* CE5: target->host HTT (HIF->HTT) */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(512),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(4096),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host packtlog */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(64),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9 target autonomous qcache memcpy */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* It not necessary to send target wlan configuration for CE10 & CE11
+ * as these CEs are not actively used in target.
+ */
+};
+
+/*
+ * Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+static const struct ce_service_to_pipe pci_target_service_to_ce_map_wlan[] = {
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+static bool ath10k_pci_is_awake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ RTC_STATE_ADDRESS);
+
+ return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
+}
+
+static void __ath10k_pci_wake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ lockdep_assert_held(&ar_pci->ps_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ iowrite32(PCIE_SOC_WAKE_V_MASK,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+}
+
+static void __ath10k_pci_sleep(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ lockdep_assert_held(&ar_pci->ps_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ iowrite32(PCIE_SOC_WAKE_RESET,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+ ar_pci->ps_awake = false;
+}
+
+static int ath10k_pci_wake_wait(struct ath10k *ar)
+{
+ int tot_delay = 0;
+ int curr_delay = 5;
+
+ while (tot_delay < PCIE_WAKE_TIMEOUT) {
+ if (ath10k_pci_is_awake(ar)) {
+ if (tot_delay > PCIE_WAKE_LATE_US)
+ ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
+ tot_delay / 1000);
+ return 0;
+ }
+
+ udelay(curr_delay);
+ tot_delay += curr_delay;
+
+ if (curr_delay < 50)
+ curr_delay += 5;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int ath10k_pci_force_wake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+ int ret = 0;
+
+ if (ar_pci->pci_ps)
+ return ret;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ if (!ar_pci->ps_awake) {
+ iowrite32(PCIE_SOC_WAKE_V_MASK,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+
+ ret = ath10k_pci_wake_wait(ar);
+ if (ret == 0)
+ ar_pci->ps_awake = true;
+ }
+
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+
+ return ret;
+}
+
+static void ath10k_pci_force_sleep(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ iowrite32(PCIE_SOC_WAKE_RESET,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+ ar_pci->ps_awake = false;
+
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static int ath10k_pci_wake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+ int ret = 0;
+
+ if (ar_pci->pci_ps == 0)
+ return ret;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ /* This function can be called very frequently. To avoid excessive
+ * CPU stalls for MMIO reads use a cache var to hold the device state.
+ */
+ if (!ar_pci->ps_awake) {
+ __ath10k_pci_wake(ar);
+
+ ret = ath10k_pci_wake_wait(ar);
+ if (ret == 0)
+ ar_pci->ps_awake = true;
+ }
+
+ if (ret == 0) {
+ ar_pci->ps_wake_refcount++;
+ WARN_ON(ar_pci->ps_wake_refcount == 0);
+ }
+
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+
+ return ret;
+}
+
+static void ath10k_pci_sleep(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
+ if (ar_pci->pci_ps == 0)
+ return;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ if (WARN_ON(ar_pci->ps_wake_refcount == 0))
+ goto skip;
+
+ ar_pci->ps_wake_refcount--;
+
+ mod_timer(&ar_pci->ps_timer, jiffies +
+ msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
+
+skip:
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_pci_ps_timer(struct timer_list *t)
+{
+ struct ath10k_pci *ar_pci = timer_container_of(ar_pci, t, ps_timer);
+ struct ath10k *ar = ar_pci->ar;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ if (ar_pci->ps_wake_refcount > 0)
+ goto skip;
+
+ __ath10k_pci_sleep(ar);
+
+skip:
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_pci_sleep_sync(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
+ if (ar_pci->pci_ps == 0) {
+ ath10k_pci_force_sleep(ar);
+ return;
+ }
+
+ timer_delete_sync(&ar_pci->ps_timer);
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+ WARN_ON(ar_pci->ps_wake_refcount > 0);
+ __ath10k_pci_sleep(ar);
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
+ ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+ offset, offset + sizeof(value), ar_pci->mem_len);
+ return;
+ }
+
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
+ value, offset, ret);
+ return;
+ }
+
+ iowrite32(value, ar_pci->mem + offset);
+ ath10k_pci_sleep(ar);
+}
+
+static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 val;
+ int ret;
+
+ if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
+ ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+ offset, offset + sizeof(val), ar_pci->mem_len);
+ return 0;
+ }
+
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
+ offset, ret);
+ return 0xffffffff;
+ }
+
+ val = ioread32(ar_pci->mem + offset);
+ ath10k_pci_sleep(ar);
+
+ return val;
+}
+
+inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ ce->bus_ops->write32(ar, offset, value);
+}
+
+inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ return ce->bus_ops->read32(ar, offset);
+}
+
+u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
+{
+ return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
+}
+
+void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
+}
+
+u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
+{
+ return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
+}
+
+void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+ ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
+}
+
+bool ath10k_pci_irq_pending(struct ath10k *ar)
+{
+ u32 cause;
+
+ /* Check if the shared legacy irq is for us */
+ cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_CAUSE_ADDRESS);
+ if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
+ return true;
+
+ return false;
+}
+
+void ath10k_pci_disable_and_clear_intx_irq(struct ath10k *ar)
+{
+ /* IMPORTANT: INTR_CLR register has to be set after
+ * INTR_ENABLE is set to 0, otherwise interrupt can not be
+ * really cleared.
+ */
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+ 0);
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+ /* IMPORTANT: this extra read transaction is required to
+ * flush the posted write buffer.
+ */
+ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS);
+}
+
+void ath10k_pci_enable_intx_irq(struct ath10k *ar)
+{
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+ /* IMPORTANT: this extra read transaction is required to
+ * flush the posted write buffer.
+ */
+ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS);
+}
+
+static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
+ return "msi";
+
+ return "legacy";
+}
+
+static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
+{
+ struct ath10k *ar = pipe->hif_ce_state;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret;
+
+ skb = dev_alloc_skb(pipe->buf_sz);
+ if (!skb)
+ return -ENOMEM;
+
+ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+ paddr = dma_map_single(ar->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ar->dev, paddr))) {
+ ath10k_warn(ar, "failed to dma map pci rx buf\n");
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+
+ ATH10K_SKB_RXCB(skb)->paddr = paddr;
+
+ spin_lock_bh(&ce->ce_lock);
+ ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
+ spin_unlock_bh(&ce->ce_lock);
+ if (ret) {
+ dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
+{
+ struct ath10k *ar = pipe->hif_ce_state;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+ int ret, num;
+
+ if (pipe->buf_sz == 0)
+ return;
+
+ if (!ce_pipe->dest_ring)
+ return;
+
+ spin_lock_bh(&ce->ce_lock);
+ num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
+ spin_unlock_bh(&ce->ce_lock);
+
+ while (num >= 0) {
+ ret = __ath10k_pci_rx_post_buf(pipe);
+ if (ret) {
+ if (ret == -ENOSPC)
+ break;
+ ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
+ mod_timer(&ar_pci->rx_post_retry, jiffies +
+ ATH10K_PCI_RX_POST_RETRY_MS);
+ break;
+ }
+ num--;
+ }
+}
+
+void ath10k_pci_rx_post(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
+}
+
+void ath10k_pci_rx_replenish_retry(struct timer_list *t)
+{
+ struct ath10k_pci *ar_pci = timer_container_of(ar_pci, t,
+ rx_post_retry);
+ struct ath10k *ar = ar_pci->ar;
+
+ ath10k_pci_rx_post(ar);
+}
+
+static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
+ & 0x7ff) << 21;
+ val |= 0x100000 | region;
+ return val;
+}
+
+/* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.
+ * Support to access target space below 1M for qca6174 and qca9377.
+ * If target space is below 1M, the bit[20] of converted CE addr is 0.
+ * Otherwise bit[20] of converted CE addr is 1.
+ */
+static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
+ & 0x7ff) << 21;
+ val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
+ return val;
+}
+
+static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+ val |= 0x100000 | region;
+ return val;
+}
+
+static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
+ return -EOPNOTSUPP;
+
+ return ar_pci->targ_cpu_to_ce_addr(ar, addr);
+}
+
+/*
+ * Diagnostic read/write access is provided for startup/config/debug usage.
+ * Caller must guarantee proper alignment, when applicable, and single user
+ * at any moment.
+ */
+static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
+ int nbytes)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret = 0;
+ u32 *buf;
+ unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
+ struct ath10k_ce_pipe *ce_diag;
+ /* Host buffer address in CE space */
+ u32 ce_data;
+ dma_addr_t ce_data_base = 0;
+ void *data_buf;
+ int i;
+
+ mutex_lock(&ar_pci->ce_diag_mutex);
+ ce_diag = ar_pci->ce_diag;
+
+ /*
+ * Allocate a temporary bounce buffer to hold caller's data
+ * to be DMA'ed from Target. This guarantees
+ * 1) 4-byte alignment
+ * 2) Buffer in DMA-able space
+ */
+ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
+
+ data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
+ GFP_ATOMIC);
+ if (!data_buf) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /* The address supplied by the caller is in the
+ * Target CPU virtual address space.
+ *
+ * In order to use this address with the diagnostic CE,
+ * convert it from Target CPU virtual address space
+ * to CE address space
+ */
+ address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
+
+ remaining_bytes = nbytes;
+ ce_data = ce_data_base;
+ while (remaining_bytes) {
+ nbytes = min_t(unsigned int, remaining_bytes,
+ DIAG_TRANSFER_LIMIT);
+
+ ret = ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
+ if (ret != 0)
+ goto done;
+
+ /* Request CE to send from Target(!) address to Host buffer */
+ ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0, 0);
+ if (ret)
+ goto done;
+
+ i = 0;
+ while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
+ udelay(DIAG_ACCESS_CE_WAIT_US);
+ i += DIAG_ACCESS_CE_WAIT_US;
+
+ if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ i = 0;
+ while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
+ &completed_nbytes) != 0) {
+ udelay(DIAG_ACCESS_CE_WAIT_US);
+ i += DIAG_ACCESS_CE_WAIT_US;
+
+ if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ if (nbytes != completed_nbytes) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (*buf != ce_data) {
+ ret = -EIO;
+ goto done;
+ }
+
+ remaining_bytes -= nbytes;
+ memcpy(data, data_buf, nbytes);
+
+ address += nbytes;
+ data += nbytes;
+ }
+
+done:
+
+ if (data_buf)
+ dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
+ ce_data_base);
+
+ mutex_unlock(&ar_pci->ce_diag_mutex);
+
+ return ret;
+}
+
+static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
+{
+ __le32 val = 0;
+ int ret;
+
+ ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
+ *value = __le32_to_cpu(val);
+
+ return ret;
+}
+
+static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
+ u32 src, u32 len)
+{
+ u32 host_addr, addr;
+ int ret;
+
+ host_addr = host_interest_item_address(src);
+
+ ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
+ if (ret != 0) {
+ ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
+ src, ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
+ if (ret != 0) {
+ ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
+ addr, len, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
+ __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
+
+int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret = 0;
+ u32 *buf;
+ unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
+ struct ath10k_ce_pipe *ce_diag;
+ void *data_buf;
+ dma_addr_t ce_data_base = 0;
+ int i;
+
+ mutex_lock(&ar_pci->ce_diag_mutex);
+ ce_diag = ar_pci->ce_diag;
+
+ /*
+ * Allocate a temporary bounce buffer to hold caller's data
+ * to be DMA'ed to Target. This guarantees
+ * 1) 4-byte alignment
+ * 2) Buffer in DMA-able space
+ */
+ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
+
+ data_buf = dma_alloc_coherent(ar->dev, alloc_nbytes, &ce_data_base,
+ GFP_ATOMIC);
+ if (!data_buf) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /*
+ * The address supplied by the caller is in the
+ * Target CPU virtual address space.
+ *
+ * In order to use this address with the diagnostic CE,
+ * convert it from
+ * Target CPU virtual address space
+ * to
+ * CE address space
+ */
+ address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
+
+ remaining_bytes = nbytes;
+ while (remaining_bytes) {
+ /* FIXME: check cast */
+ nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
+
+ /* Copy caller's data to allocated DMA buf */
+ memcpy(data_buf, data, nbytes);
+
+ /* Set up to receive directly into Target(!) address */
+ ret = ath10k_ce_rx_post_buf(ce_diag, &address, address);
+ if (ret != 0)
+ goto done;
+
+ /*
+ * Request CE to send caller-supplied data that
+ * was copied to bounce buffer to Target(!) address.
+ */
+ ret = ath10k_ce_send(ce_diag, NULL, ce_data_base, nbytes, 0, 0);
+ if (ret != 0)
+ goto done;
+
+ i = 0;
+ while (ath10k_ce_completed_send_next(ce_diag, NULL) != 0) {
+ udelay(DIAG_ACCESS_CE_WAIT_US);
+ i += DIAG_ACCESS_CE_WAIT_US;
+
+ if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ i = 0;
+ while (ath10k_ce_completed_recv_next(ce_diag, (void **)&buf,
+ &completed_nbytes) != 0) {
+ udelay(DIAG_ACCESS_CE_WAIT_US);
+ i += DIAG_ACCESS_CE_WAIT_US;
+
+ if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ if (nbytes != completed_nbytes) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (*buf != address) {
+ ret = -EIO;
+ goto done;
+ }
+
+ remaining_bytes -= nbytes;
+ address += nbytes;
+ data += nbytes;
+ }
+
+done:
+ if (data_buf) {
+ dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
+ ce_data_base);
+ }
+
+ if (ret != 0)
+ ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
+ address, ret);
+
+ mutex_unlock(&ar_pci->ce_diag_mutex);
+
+ return ret;
+}
+
+static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
+{
+ __le32 val = __cpu_to_le32(value);
+
+ return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
+}
+
+/* Called by lower (CE) layer when a send to Target completes. */
+static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+ /* no need to call tx completion for NULL pointers */
+ if (skb == NULL)
+ continue;
+
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list)))
+ ath10k_htc_tx_completion_handler(ar, skb);
+}
+
+static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
+ void (*callback)(struct ath10k *ar,
+ struct sk_buff *skb))
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ void *transfer_context;
+ unsigned int nbytes, max_nbytes;
+
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+ &nbytes) == 0) {
+ skb = transfer_context;
+ max_nbytes = skb->len + skb_tailroom(skb);
+ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
+ ce_state->id, skb->len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
+ skb->data, skb->len);
+
+ callback(ar, skb);
+ }
+
+ ath10k_pci_rx_post_pipe(pipe_info);
+}
+
+static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
+ void (*callback)(struct ath10k *ar,
+ struct sk_buff *skb))
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ void *transfer_context;
+ unsigned int nbytes, max_nbytes, nentries;
+ int orig_len;
+
+ /* No need to acquire ce_lock for CE5, since this is the only place CE5
+ * is processed other than init and deinit. Before releasing CE5
+ * buffers, interrupts are disabled. Thus CE5 access is serialized.
+ */
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
+ &nbytes) == 0) {
+ skb = transfer_context;
+ max_nbytes = skb->len + skb_tailroom(skb);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ continue;
+ }
+
+ dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ nentries = skb_queue_len(&list);
+ while ((skb = __skb_dequeue(&list))) {
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
+ ce_state->id, skb->len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
+ skb->data, skb->len);
+
+ orig_len = skb->len;
+ callback(ar, skb);
+ skb_push(skb, orig_len - skb->len);
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ /*let device gain the buffer again*/
+ dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ }
+ ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
+}
+
+/* Called by lower (CE) layer when data is received from the Target. */
+static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ /* CE4 polling needs to be done whenever CE pipe which transports
+ * HTT Rx (target->host) is processed.
+ */
+ ath10k_ce_per_engine_service(ce_state->ar, 4);
+
+ ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+/* Called by lower (CE) layer when data is received from the Target.
+ * Only 10.4 firmware uses separate CE to transfer pktlog data.
+ */
+static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ ath10k_pci_process_rx_cb(ce_state,
+ ath10k_htt_rx_pktlog_completion_handler);
+}
+
+/* Called by lower (CE) layer when a send to HTT Target completes. */
+static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct sk_buff *skb;
+
+ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+ /* no need to call tx completion for NULL pointers */
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+ skb->len, DMA_TO_DEVICE);
+ ath10k_htt_hif_tx_complete(ar, skb);
+ }
+}
+
+static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
+{
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ ath10k_htt_t2h_msg_handler(ar, skb);
+}
+
+/* Called by lower (CE) layer when HTT data is received from the Target. */
+static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ /* CE4 polling needs to be done whenever CE pipe which transports
+ * HTT Rx (target->host) is processed.
+ */
+ ath10k_ce_per_engine_service(ce_state->ar, 4);
+
+ ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
+}
+
+int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
+ struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
+ struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int err, i = 0;
+
+ spin_lock_bh(&ce->ce_lock);
+
+ nentries_mask = src_ring->nentries_mask;
+ sw_index = src_ring->sw_index;
+ write_index = src_ring->write_index;
+
+ if (unlikely(CE_RING_DELTA(nentries_mask,
+ write_index, sw_index - 1) < n_items)) {
+ err = -ENOBUFS;
+ goto err;
+ }
+
+ for (i = 0; i < n_items - 1; i++) {
+ ath10k_dbg(ar, ATH10K_DBG_PCI,
+ "pci tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
+ items[i].vaddr, items[i].len);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ CE_SEND_FLAG_GATHER);
+ if (err)
+ goto err;
+ }
+
+ /* `i` is equal to `n_items -1` after for() */
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI,
+ "pci tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
+ items[i].vaddr, items[i].len);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ 0);
+ if (err)
+ goto err;
+
+ spin_unlock_bh(&ce->ce_lock);
+ return 0;
+
+err:
+ for (; i > 0; i--)
+ __ath10k_ce_send_revert(ce_pipe);
+
+ spin_unlock_bh(&ce->ce_lock);
+ return err;
+}
+
+int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+{
+ return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
+}
+
+u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
+
+ return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
+}
+
+static void ath10k_pci_dump_registers(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
+{
+ __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
+ int i, ret;
+
+ lockdep_assert_held(&ar->dump_mutex);
+
+ ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
+ hi_failure_state,
+ REG_DUMP_COUNT_QCA988X * sizeof(__le32));
+ if (ret) {
+ ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
+ return;
+ }
+
+ BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
+
+ ath10k_err(ar, "firmware register dump:\n");
+ for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
+ ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ i,
+ __le32_to_cpu(reg_dump_values[i]),
+ __le32_to_cpu(reg_dump_values[i + 1]),
+ __le32_to_cpu(reg_dump_values[i + 2]),
+ __le32_to_cpu(reg_dump_values[i + 3]));
+
+ if (!crash_data)
+ return;
+
+ for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
+ crash_data->registers[i] = reg_dump_values[i];
+}
+
+static int ath10k_pci_dump_memory_section(struct ath10k *ar,
+ const struct ath10k_mem_region *mem_region,
+ u8 *buf, size_t buf_len)
+{
+ const struct ath10k_mem_section *cur_section, *next_section;
+ unsigned int count, section_size, skip_size;
+ int ret, i, j;
+
+ if (!mem_region || !buf)
+ return 0;
+
+ cur_section = &mem_region->section_table.sections[0];
+
+ if (mem_region->start > cur_section->start) {
+ ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
+ mem_region->start, cur_section->start);
+ return 0;
+ }
+
+ skip_size = cur_section->start - mem_region->start;
+
+ /* fill the gap between the first register section and register
+ * start address
+ */
+ for (i = 0; i < skip_size; i++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count = 0;
+
+ for (i = 0; cur_section != NULL; i++) {
+ section_size = cur_section->end - cur_section->start;
+
+ if (section_size <= 0) {
+ ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
+ cur_section->start,
+ cur_section->end);
+ break;
+ }
+
+ if ((i + 1) == mem_region->section_table.size) {
+ /* last section */
+ next_section = NULL;
+ skip_size = 0;
+ } else {
+ next_section = cur_section + 1;
+
+ if (cur_section->end > next_section->start) {
+ ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
+ next_section->start,
+ cur_section->end);
+ break;
+ }
+
+ skip_size = next_section->start - cur_section->end;
+ }
+
+ if (buf_len < (skip_size + section_size)) {
+ ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
+ break;
+ }
+
+ buf_len -= skip_size + section_size;
+
+ /* read section to dest memory */
+ ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
+ buf, section_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
+ cur_section->start, ret);
+ break;
+ }
+
+ buf += section_size;
+ count += section_size;
+
+ /* fill in the gap between this section and the next */
+ for (j = 0; j < skip_size; j++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count += skip_size;
+
+ if (!next_section)
+ /* this was the last section */
+ break;
+
+ cur_section = next_section;
+ }
+
+ return count;
+}
+
+static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
+{
+ u32 val;
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ FW_RAM_CONFIG_ADDRESS, config);
+
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ FW_RAM_CONFIG_ADDRESS);
+ if (val != config) {
+ ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
+ val, config);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Always returns the length */
+static int ath10k_pci_dump_memory_sram(struct ath10k *ar,
+ const struct ath10k_mem_region *region,
+ u8 *buf)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 base_addr, i;
+
+ base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG);
+ base_addr += region->start;
+
+ for (i = 0; i < region->len; i += 4) {
+ iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG);
+ *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG);
+ }
+
+ return region->len;
+}
+
+/* if an error happened returns < 0, otherwise the length */
+static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
+ const struct ath10k_mem_region *region,
+ u8 *buf)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 i;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH10K_STATE_ON) {
+ ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
+ ret = -EIO;
+ goto done;
+ }
+
+ for (i = 0; i < region->len; i += 4)
+ *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
+
+ ret = region->len;
+done:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+/* if an error happened returns < 0, otherwise the length */
+static int ath10k_pci_dump_memory_generic(struct ath10k *ar,
+ const struct ath10k_mem_region *current_region,
+ u8 *buf)
+{
+ int ret;
+
+ if (current_region->section_table.size > 0)
+ /* Copy each section individually. */
+ return ath10k_pci_dump_memory_section(ar,
+ current_region,
+ buf,
+ current_region->len);
+
+ /* No individual memory sections defined so we can
+ * copy the entire memory region.
+ */
+ ret = ath10k_pci_diag_read_mem(ar,
+ current_region->start,
+ buf,
+ current_region->len);
+ if (ret) {
+ ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
+ current_region->name, ret);
+ return ret;
+ }
+
+ return current_region->len;
+}
+
+static void ath10k_pci_dump_memory(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
+{
+ const struct ath10k_hw_mem_layout *mem_layout;
+ const struct ath10k_mem_region *current_region;
+ struct ath10k_dump_ram_data_hdr *hdr;
+ u32 count, shift;
+ size_t buf_len;
+ int ret, i;
+ u8 *buf;
+
+ lockdep_assert_held(&ar->dump_mutex);
+
+ if (!crash_data)
+ return;
+
+ mem_layout = ath10k_coredump_get_mem_layout(ar);
+ if (!mem_layout)
+ return;
+
+ current_region = &mem_layout->region_table.regions[0];
+
+ buf = crash_data->ramdump_buf;
+ buf_len = crash_data->ramdump_buf_len;
+
+ memset(buf, 0, buf_len);
+
+ for (i = 0; i < mem_layout->region_table.size; i++) {
+ count = 0;
+
+ if (current_region->len > buf_len) {
+ ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
+ current_region->name,
+ current_region->len,
+ buf_len);
+ break;
+ }
+
+ /* To get IRAM dump, the host driver needs to switch target
+ * ram config from DRAM to IRAM.
+ */
+ if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
+ current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
+ shift = current_region->start >> 20;
+
+ ret = ath10k_pci_set_ram_config(ar, shift);
+ if (ret) {
+ ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
+ current_region->name, ret);
+ break;
+ }
+ }
+
+ /* Reserve space for the header. */
+ hdr = (void *)buf;
+ buf += sizeof(*hdr);
+ buf_len -= sizeof(*hdr);
+
+ switch (current_region->type) {
+ case ATH10K_MEM_REGION_TYPE_IOSRAM:
+ count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
+ break;
+ case ATH10K_MEM_REGION_TYPE_IOREG:
+ ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
+ if (ret < 0)
+ break;
+
+ count = ret;
+ break;
+ default:
+ ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
+ if (ret < 0)
+ break;
+
+ count = ret;
+ break;
+ }
+
+ hdr->region_type = cpu_to_le32(current_region->type);
+ hdr->start = cpu_to_le32(current_region->start);
+ hdr->length = cpu_to_le32(count);
+
+ if (count == 0)
+ /* Note: the header remains, just with zero length. */
+ break;
+
+ buf += count;
+ buf_len -= count;
+
+ current_region++;
+ }
+}
+
+static void ath10k_pci_fw_dump_work(struct work_struct *work)
+{
+ struct ath10k_pci *ar_pci = container_of(work, struct ath10k_pci,
+ dump_work);
+ struct ath10k_fw_crash_data *crash_data;
+ struct ath10k *ar = ar_pci->ar;
+ char guid[UUID_STRING_LEN + 1];
+
+ mutex_lock(&ar->dump_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->stats.fw_crash_counter++;
+ spin_unlock_bh(&ar->data_lock);
+
+ crash_data = ath10k_coredump_new(ar);
+
+ if (crash_data)
+ scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
+ else
+ scnprintf(guid, sizeof(guid), "n/a");
+
+ ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
+ ath10k_print_driver_info(ar);
+ ath10k_pci_dump_registers(ar, crash_data);
+ ath10k_ce_dump_registers(ar, crash_data);
+ ath10k_pci_dump_memory(ar, crash_data);
+
+ mutex_unlock(&ar->dump_mutex);
+
+ ath10k_core_start_recovery(ar);
+}
+
+static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ queue_work(ar->workqueue, &ar_pci->dump_work);
+}
+
+void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+ int force)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
+
+ if (!force) {
+ int resources;
+ /*
+ * Decide whether to actually poll for completions, or just
+ * wait for a later chance.
+ * If there seem to be plenty of resources left, then just wait
+ * since checking involves reading a CE register, which is a
+ * relatively expensive operation.
+ */
+ resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
+
+ /*
+ * If at least 50% of the total resources are still available,
+ * don't bother checking again yet.
+ */
+ if (resources > (ar_pci->attr[pipe].src_nentries >> 1))
+ return;
+ }
+ ath10k_ce_per_engine_service(ar, pipe);
+}
+
+static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ timer_delete_sync(&ar_pci->rx_post_retry);
+}
+
+int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ const struct ce_service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
+
+ for (i = 0; i < ARRAY_SIZE(pci_target_service_to_ce_map_wlan); i++) {
+ entry = &ar_pci->serv_to_pipe[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (!ul_set || !dl_set)
+ return -ENOENT;
+
+ return 0;
+}
+
+void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
+
+ (void)ath10k_pci_hif_map_service_to_pipe(ar,
+ ATH10K_HTC_SVC_ID_RSVD_CTRL,
+ ul_pipe, dl_pipe);
+}
+
+void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
+{
+ u32 val;
+
+ switch (ar->hw_rev) {
+ case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
+ case ATH10K_HW_QCA6174:
+ case ATH10K_HW_QCA9377:
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS);
+ val &= ~CORE_CTRL_PCIE_REG_31_MASK;
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS, val);
+ break;
+ case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
+ case ATH10K_HW_QCA9888:
+ case ATH10K_HW_QCA4019:
+ /* TODO: Find appropriate register configuration for QCA99X0
+ * to mask irq/MSI.
+ */
+ break;
+ case ATH10K_HW_WCN3990:
+ break;
+ }
+}
+
+static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
+{
+ u32 val;
+
+ switch (ar->hw_rev) {
+ case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
+ case ATH10K_HW_QCA6174:
+ case ATH10K_HW_QCA9377:
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS);
+ val |= CORE_CTRL_PCIE_REG_31_MASK;
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS, val);
+ break;
+ case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
+ case ATH10K_HW_QCA9888:
+ case ATH10K_HW_QCA4019:
+ /* TODO: Find appropriate register configuration for QCA99X0
+ * to unmask irq/MSI.
+ */
+ break;
+ case ATH10K_HW_WCN3990:
+ break;
+ }
+}
+
+static void ath10k_pci_irq_disable(struct ath10k *ar)
+{
+ ath10k_ce_disable_interrupts(ar);
+ ath10k_pci_disable_and_clear_intx_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+}
+
+static void ath10k_pci_irq_sync(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ synchronize_irq(ar_pci->pdev->irq);
+}
+
+static void ath10k_pci_irq_enable(struct ath10k *ar)
+{
+ ath10k_ce_enable_interrupts(ar);
+ ath10k_pci_enable_intx_irq(ar);
+ ath10k_pci_irq_msi_fw_unmask(ar);
+}
+
+static int ath10k_pci_hif_start(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
+
+ ath10k_core_napi_enable(ar);
+
+ ath10k_pci_irq_enable(ar);
+ ath10k_pci_rx_post(ar);
+
+ pcie_capability_clear_and_set_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC,
+ ar_pci->link_ctl & PCI_EXP_LNKCTL_ASPMC);
+
+ return 0;
+}
+
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
+{
+ struct ath10k *ar;
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct sk_buff *skb;
+ int i;
+
+ ar = pci_pipe->hif_ce_state;
+ ce_pipe = pci_pipe->ce_hdl;
+ ce_ring = ce_pipe->dest_ring;
+
+ if (!ce_ring)
+ return;
+
+ if (!pci_pipe->buf_sz)
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
+
+ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
+{
+ struct ath10k *ar;
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct sk_buff *skb;
+ int i;
+
+ ar = pci_pipe->hif_ce_state;
+ ce_pipe = pci_pipe->ce_hdl;
+ ce_ring = ce_pipe->src_ring;
+
+ if (!ce_ring)
+ return;
+
+ if (!pci_pipe->buf_sz)
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
+
+ ath10k_htc_tx_completion_handler(ar, skb);
+ }
+}
+
+/*
+ * Cleanup residual buffers for device shutdown:
+ * buffers that were enqueued for receive
+ * buffers that were to be sent
+ * Note: Buffers that had completed but which were
+ * not yet processed are on a completion queue. They
+ * are handled when the completion thread shuts down.
+ */
+static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int pipe_num;
+
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+ struct ath10k_pci_pipe *pipe_info;
+
+ pipe_info = &ar_pci->pipe_info[pipe_num];
+ ath10k_pci_rx_pipe_cleanup(pipe_info);
+ ath10k_pci_tx_pipe_cleanup(pipe_info);
+ }
+}
+
+void ath10k_pci_ce_deinit(struct ath10k *ar)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_deinit_pipe(ar, i);
+}
+
+void ath10k_pci_flush(struct ath10k *ar)
+{
+ ath10k_pci_rx_retry_sync(ar);
+ ath10k_pci_buffer_cleanup(ar);
+}
+
+static void ath10k_pci_hif_stop(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
+
+ ath10k_pci_irq_disable(ar);
+ ath10k_pci_irq_sync(ar);
+
+ ath10k_core_napi_sync_disable(ar);
+
+ cancel_work_sync(&ar_pci->dump_work);
+
+ /* Most likely the device has HTT Rx ring configured. The only way to
+ * prevent the device from accessing (and possible corrupting) host
+ * memory is to reset the chip now.
+ *
+ * There's also no known way of masking MSI interrupts on the device.
+ * For ranged MSI the CE-related interrupts can be masked. However
+ * regardless how many MSI interrupts are assigned the first one
+ * is always used for firmware indications (crashes) and cannot be
+ * masked. To prevent the device from asserting the interrupt reset it
+ * before proceeding with cleanup.
+ */
+ ath10k_pci_safe_chip_reset(ar);
+
+ ath10k_pci_flush(ar);
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+ WARN_ON(ar_pci->ps_wake_refcount > 0);
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
+ void *req, u32 req_len,
+ void *resp, u32 *resp_len)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
+ struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
+ struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
+ struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
+ dma_addr_t req_paddr = 0;
+ dma_addr_t resp_paddr = 0;
+ struct bmi_xfer xfer = {};
+ void *treq, *tresp = NULL;
+ int ret = 0;
+
+ might_sleep();
+
+ if (resp && !resp_len)
+ return -EINVAL;
+
+ if (resp && resp_len && *resp_len == 0)
+ return -EINVAL;
+
+ treq = kmemdup(req, req_len, GFP_KERNEL);
+ if (!treq)
+ return -ENOMEM;
+
+ req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ar->dev, req_paddr);
+ if (ret) {
+ ret = -EIO;
+ goto err_dma;
+ }
+
+ if (resp && resp_len) {
+ tresp = kzalloc(*resp_len, GFP_KERNEL);
+ if (!tresp) {
+ ret = -ENOMEM;
+ goto err_req;
+ }
+
+ resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(ar->dev, resp_paddr);
+ if (ret) {
+ ret = -EIO;
+ goto err_req;
+ }
+
+ xfer.wait_for_resp = true;
+ xfer.resp_len = 0;
+
+ ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
+ }
+
+ ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
+ if (ret)
+ goto err_resp;
+
+ ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
+ if (ret) {
+ dma_addr_t unused_buffer;
+ unsigned int unused_nbytes;
+ unsigned int unused_id;
+
+ ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
+ &unused_nbytes, &unused_id);
+ } else {
+ /* non-zero means we did not time out */
+ ret = 0;
+ }
+
+err_resp:
+ if (resp) {
+ dma_addr_t unused_buffer;
+
+ ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
+ dma_unmap_single(ar->dev, resp_paddr,
+ *resp_len, DMA_FROM_DEVICE);
+ }
+err_req:
+ dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
+
+ if (ret == 0 && resp_len) {
+ *resp_len = min(*resp_len, xfer.resp_len);
+ memcpy(resp, tresp, *resp_len);
+ }
+err_dma:
+ kfree(treq);
+ kfree(tresp);
+
+ return ret;
+}
+
+static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
+{
+ struct bmi_xfer *xfer;
+
+ if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
+ return;
+
+ xfer->tx_done = true;
+}
+
+static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct bmi_xfer *xfer;
+ unsigned int nbytes;
+
+ if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
+ &nbytes))
+ return;
+
+ if (WARN_ON_ONCE(!xfer))
+ return;
+
+ if (!xfer->wait_for_resp) {
+ ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
+ return;
+ }
+
+ xfer->resp_len = nbytes;
+ xfer->rx_done = true;
+}
+
+static int ath10k_pci_bmi_wait(struct ath10k *ar,
+ struct ath10k_ce_pipe *tx_pipe,
+ struct ath10k_ce_pipe *rx_pipe,
+ struct bmi_xfer *xfer)
+{
+ unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
+ unsigned long started = jiffies;
+ unsigned long dur;
+ int ret;
+
+ while (time_before_eq(jiffies, timeout)) {
+ ath10k_pci_bmi_send_done(tx_pipe);
+ ath10k_pci_bmi_recv_data(rx_pipe);
+
+ if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
+ ret = 0;
+ goto out;
+ }
+
+ schedule();
+ }
+
+ ret = -ETIMEDOUT;
+
+out:
+ dur = jiffies - started;
+ if (dur > HZ)
+ ath10k_dbg(ar, ATH10K_DBG_BMI,
+ "bmi cmd took %lu jiffies hz %d ret %d\n",
+ dur, HZ, ret);
+ return ret;
+}
+
+/*
+ * Send an interrupt to the device to wake up the Target CPU
+ * so it has an opportunity to notice any changed state.
+ */
+static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
+{
+ u32 addr, val;
+
+ addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
+ val = ath10k_pci_read32(ar, addr);
+ val |= CORE_CTRL_CPU_INTR_MASK;
+ ath10k_pci_write32(ar, addr, val);
+
+ return 0;
+}
+
+static int ath10k_pci_get_num_banks(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ switch (ar_pci->pdev->device) {
+ case QCA988X_2_0_DEVICE_ID_UBNT:
+ case QCA988X_2_0_DEVICE_ID:
+ case QCA99X0_2_0_DEVICE_ID:
+ case QCA9888_2_0_DEVICE_ID:
+ case QCA9984_1_0_DEVICE_ID:
+ case QCA9887_1_0_DEVICE_ID:
+ return 1;
+ case QCA6164_2_1_DEVICE_ID:
+ case QCA6174_2_1_DEVICE_ID:
+ switch (MS(ar->bus_param.chip_id, SOC_CHIP_ID_REV)) {
+ case QCA6174_HW_1_0_CHIP_ID_REV:
+ case QCA6174_HW_1_1_CHIP_ID_REV:
+ case QCA6174_HW_2_1_CHIP_ID_REV:
+ case QCA6174_HW_2_2_CHIP_ID_REV:
+ return 3;
+ case QCA6174_HW_1_3_CHIP_ID_REV:
+ return 2;
+ case QCA6174_HW_3_0_CHIP_ID_REV:
+ case QCA6174_HW_3_1_CHIP_ID_REV:
+ case QCA6174_HW_3_2_CHIP_ID_REV:
+ return 9;
+ }
+ break;
+ case QCA9377_1_0_DEVICE_ID:
+ return 9;
+ }
+
+ ath10k_warn(ar, "unknown number of banks, assuming 1\n");
+ return 1;
+}
+
+static int ath10k_bus_get_num_banks(struct ath10k *ar)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ return ce->bus_ops->get_num_banks(ar);
+}
+
+int ath10k_pci_init_config(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 interconnect_targ_addr;
+ u32 pcie_state_targ_addr = 0;
+ u32 pipe_cfg_targ_addr = 0;
+ u32 svc_to_pipe_map = 0;
+ u32 pcie_config_flags = 0;
+ u32 ealloc_value;
+ u32 ealloc_targ_addr;
+ u32 flag2_value;
+ u32 flag2_targ_addr;
+ int ret = 0;
+
+ /* Download to Target the CE Config and the service-to-CE map */
+ interconnect_targ_addr =
+ host_interest_item_address(HI_ITEM(hi_interconnect_state));
+
+ /* Supply Target-side CE configuration */
+ ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
+ &pcie_state_targ_addr);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
+ return ret;
+ }
+
+ if (pcie_state_targ_addr == 0) {
+ ret = -EIO;
+ ath10k_err(ar, "Invalid pcie state addr\n");
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ pipe_cfg_addr)),
+ &pipe_cfg_targ_addr);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
+ return ret;
+ }
+
+ if (pipe_cfg_targ_addr == 0) {
+ ret = -EIO;
+ ath10k_err(ar, "Invalid pipe cfg addr\n");
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
+ ar_pci->pipe_config,
+ sizeof(struct ce_pipe_config) *
+ NUM_TARGET_CE_CONFIG_WLAN);
+
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ svc_to_pipe_map)),
+ &svc_to_pipe_map);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
+ return ret;
+ }
+
+ if (svc_to_pipe_map == 0) {
+ ret = -EIO;
+ ath10k_err(ar, "Invalid svc_to_pipe map\n");
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
+ ar_pci->serv_to_pipe,
+ sizeof(pci_target_service_to_ce_map_wlan));
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ config_flags)),
+ &pcie_config_flags);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
+ return ret;
+ }
+
+ pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
+
+ ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ config_flags)),
+ pcie_config_flags);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
+ return ret;
+ }
+
+ /* configure early allocation */
+ ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
+
+ ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
+ return ret;
+ }
+
+ /* first bank is switched to IRAM */
+ ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
+ HI_EARLY_ALLOC_MAGIC_MASK);
+ ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
+ HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
+ HI_EARLY_ALLOC_IRAM_BANKS_MASK);
+
+ ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
+ return ret;
+ }
+
+ /* Tell Target to proceed with initialization */
+ flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
+
+ ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get option val: %d\n", ret);
+ return ret;
+ }
+
+ flag2_value |= HI_OPTION_EARLY_CFG_DONE;
+
+ ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to set option val: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_pci_override_ce_config(struct ath10k *ar)
+{
+ struct ce_attr *attr;
+ struct ce_pipe_config *config;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ /* For QCA6174 we're overriding the Copy Engine 5 configuration,
+ * since it is currently used for other feature.
+ */
+
+ /* Override Host's Copy Engine 5 configuration */
+ attr = &ar_pci->attr[5];
+ attr->src_sz_max = 0;
+ attr->dest_nentries = 0;
+
+ /* Override Target firmware's Copy Engine configuration */
+ config = &ar_pci->pipe_config[5];
+ config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
+ config->nbytes_max = __cpu_to_le32(2048);
+
+ /* Map from service/endpoint to Copy Engine */
+ ar_pci->serv_to_pipe[15].pipenum = __cpu_to_le32(1);
+}
+
+int ath10k_pci_alloc_pipes(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pipe;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int i, ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ar_pci->pipe_info[i];
+ pipe->ce_hdl = &ce->ce_states[i];
+ pipe->pipe_num = i;
+ pipe->hif_ce_state = ar;
+
+ ret = ath10k_ce_alloc_pipe(ar, i, &ar_pci->attr[i]);
+ if (ret) {
+ ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+
+ /* Last CE is Diagnostic Window */
+ if (i == CE_DIAG_PIPE) {
+ ar_pci->ce_diag = pipe->ce_hdl;
+ continue;
+ }
+
+ pipe->buf_sz = (size_t)(ar_pci->attr[i].src_sz_max);
+ }
+
+ return 0;
+}
+
+void ath10k_pci_free_pipes(struct ath10k *ar)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_free_pipe(ar, i);
+}
+
+int ath10k_pci_init_pipes(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i, ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ret = ath10k_ce_init_pipe(ar, i, &ar_pci->attr[i]);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
+{
+ return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
+ FW_IND_EVENT_PENDING;
+}
+
+static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+ val &= ~FW_IND_EVENT_PENDING;
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
+}
+
+static bool ath10k_pci_has_device_gone(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+ return (val == 0xffffffff);
+}
+
+/* this function effectively clears target memory controller assert line */
+static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_SI0_RST_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+
+ msleep(10);
+
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+
+ msleep(10);
+}
+
+static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
+{
+ u32 val;
+
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
+
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
+}
+
+static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CE_RST_MASK);
+ msleep(10);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val & ~SOC_RESET_CONTROL_CE_RST_MASK);
+}
+
+static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_soc_read32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_LF_TIMER_CONTROL0_ADDRESS,
+ val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
+}
+
+static int ath10k_pci_warm_reset(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
+
+ spin_lock_bh(&ar->data_lock);
+ ar->stats.fw_warm_reset_counter++;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_pci_irq_disable(ar);
+
+ /* Make sure the target CPU is not doing anything dangerous, e.g. if it
+ * were to access copy engine while host performs copy engine reset
+ * then it is possible for the device to confuse pci-e controller to
+ * the point of bringing host system to a complete stop (i.e. hang).
+ */
+ ath10k_pci_warm_reset_si0(ar);
+ ath10k_pci_warm_reset_cpu(ar);
+ ath10k_pci_init_pipes(ar);
+ ath10k_pci_wait_for_target_init(ar);
+
+ ath10k_pci_warm_reset_clear_lf(ar);
+ ath10k_pci_warm_reset_ce(ar);
+ ath10k_pci_warm_reset_cpu(ar);
+ ath10k_pci_init_pipes(ar);
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
+
+ return 0;
+}
+
+static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
+{
+ ath10k_pci_irq_disable(ar);
+ return ath10k_pci_qca99x0_chip_reset(ar);
+}
+
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (!ar_pci->pci_soft_reset)
+ return -EOPNOTSUPP;
+
+ return ar_pci->pci_soft_reset(ar);
+}
+
+static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
+{
+ int i, ret;
+ u32 val;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
+
+ /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
+ * It is thus preferred to use warm reset which is safer but may not be
+ * able to recover the device from all possible fail scenarios.
+ *
+ * Warm reset doesn't always work on first try so attempt it a few
+ * times before giving up.
+ */
+ for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
+ ret = ath10k_pci_warm_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
+ i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
+ ret);
+ continue;
+ }
+
+ /* FIXME: Sometimes copy engine doesn't recover after warm
+ * reset. In most cases this needs cold reset. In some of these
+ * cases the device is in such a state that a cold reset may
+ * lock up the host.
+ *
+ * Reading any host interest register via copy engine is
+ * sufficient to verify if device is capable of booting
+ * firmware blob.
+ */
+ ret = ath10k_pci_init_pipes(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to init copy engine: %d\n",
+ ret);
+ continue;
+ }
+
+ ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
+ &val);
+ if (ret) {
+ ath10k_warn(ar, "failed to poke copy engine: %d\n",
+ ret);
+ continue;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
+ return 0;
+ }
+
+ if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
+ ath10k_warn(ar, "refusing cold reset as requested\n");
+ return -EPERM;
+ }
+
+ ret = ath10k_pci_cold_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
+
+ return 0;
+}
+
+static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
+
+ /* FIXME: QCA6174 requires cold + warm reset to work. */
+
+ ret = ath10k_pci_cold_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_warm_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to warm reset: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
+
+ return 0;
+}
+
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
+
+ ret = ath10k_pci_cold_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
+
+ return 0;
+}
+
+static int ath10k_pci_chip_reset(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (WARN_ON(!ar_pci->pci_hard_reset))
+ return -EOPNOTSUPP;
+
+ return ar_pci->pci_hard_reset(ar);
+}
+
+static int ath10k_pci_hif_power_up(struct ath10k *ar,
+ enum ath10k_firmware_mode fw_mode)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
+
+ pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ &ar_pci->link_ctl);
+ pcie_capability_clear_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC);
+
+ /*
+ * Bring the target up cleanly.
+ *
+ * The target may be in an undefined state with an AUX-powered Target
+ * and a Host in WoW mode. If the Host crashes, loses power, or is
+ * restarted (without unloading the driver) then the Target is left
+ * (aux) powered and running. On a subsequent driver load, the Target
+ * is in an unexpected state. We try to catch that here in order to
+ * reset the Target and retry the probe.
+ */
+ ret = ath10k_pci_chip_reset(ar);
+ if (ret) {
+ if (ath10k_pci_has_fw_crashed(ar)) {
+ ath10k_warn(ar, "firmware crashed during chip reset\n");
+ ath10k_pci_fw_crashed_clear(ar);
+ ath10k_pci_fw_crashed_dump(ar);
+ }
+
+ ath10k_err(ar, "failed to reset chip: %d\n", ret);
+ goto err_sleep;
+ }
+
+ ret = ath10k_pci_init_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+ goto err_sleep;
+ }
+
+ ret = ath10k_pci_init_config(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to setup init config: %d\n", ret);
+ goto err_ce;
+ }
+
+ ret = ath10k_pci_wake_target_cpu(ar);
+ if (ret) {
+ ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
+ goto err_ce;
+ }
+
+ return 0;
+
+err_ce:
+ ath10k_pci_ce_deinit(ar);
+
+err_sleep:
+ return ret;
+}
+
+void ath10k_pci_hif_power_down(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
+
+ /* Currently hif_power_up performs effectively a reset and hif_stop
+ * resets the chip as well so there's no point in resetting here.
+ */
+}
+
+static int ath10k_pci_hif_suspend(struct ath10k *ar)
+{
+ /* Nothing to do; the important stuff is in the driver suspend. */
+ return 0;
+}
+
+static int ath10k_pci_suspend(struct ath10k *ar)
+{
+ /* The grace timer can still be counting down and ar->ps_awake be true.
+ * It is known that the device may be asleep after resuming regardless
+ * of the SoC powersave state before suspending. Hence make sure the
+ * device is asleep before proceeding.
+ */
+ ath10k_pci_sleep_sync(ar);
+
+ return 0;
+}
+
+static int ath10k_pci_hif_resume(struct ath10k *ar)
+{
+ /* Nothing to do; the important stuff is in the driver resume. */
+ return 0;
+}
+
+static int ath10k_pci_resume(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+ u32 val;
+ int ret = 0;
+
+ ret = ath10k_pci_force_wake(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to wake up target: %d\n", ret);
+ return ret;
+ }
+
+ /* Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
+ * from interfering with C3 CPU state. pci_restore_state won't help
+ * here since it only restores the first 64 bytes pci config header.
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
+ return ret;
+}
+
+static bool ath10k_pci_validate_cal(void *data, size_t size)
+{
+ __le16 *cal_words = data;
+ u16 checksum = 0;
+ size_t i;
+
+ if (size % 2 != 0)
+ return false;
+
+ for (i = 0; i < size / 2; i++)
+ checksum ^= le16_to_cpu(cal_words[i]);
+
+ return checksum == 0xffff;
+}
+
+static void ath10k_pci_enable_eeprom(struct ath10k *ar)
+{
+ /* Enable SI clock */
+ ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
+
+ /* Configure GPIOs for I2C operation */
+ ath10k_pci_write32(ar,
+ GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
+ 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
+ SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
+ GPIO_PIN0_CONFIG) |
+ SM(1, GPIO_PIN0_PAD_PULL));
+
+ ath10k_pci_write32(ar,
+ GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
+ 4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
+ SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
+ SM(1, GPIO_PIN0_PAD_PULL));
+
+ ath10k_pci_write32(ar,
+ GPIO_BASE_ADDRESS +
+ QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
+ 1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
+
+ /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
+ ath10k_pci_write32(ar,
+ SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
+ SM(1, SI_CONFIG_ERR_INT) |
+ SM(1, SI_CONFIG_BIDIR_OD_DATA) |
+ SM(1, SI_CONFIG_I2C) |
+ SM(1, SI_CONFIG_POS_SAMPLE) |
+ SM(1, SI_CONFIG_INACTIVE_DATA) |
+ SM(1, SI_CONFIG_INACTIVE_CLK) |
+ SM(8, SI_CONFIG_DIVIDER));
+}
+
+static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
+{
+ u32 reg;
+ int wait_limit;
+
+ /* set device select byte and for the read operation */
+ reg = QCA9887_EEPROM_SELECT_READ |
+ SM(addr, QCA9887_EEPROM_ADDR_LO) |
+ SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
+ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
+
+ /* write transmit data, transfer length, and START bit */
+ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
+ SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
+ SM(4, SI_CS_TX_CNT));
+
+ /* wait max 1 sec */
+ wait_limit = 100000;
+
+ /* wait for SI_CS_DONE_INT */
+ do {
+ reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
+ if (MS(reg, SI_CS_DONE_INT))
+ break;
+
+ wait_limit--;
+ udelay(10);
+ } while (wait_limit > 0);
+
+ if (!MS(reg, SI_CS_DONE_INT)) {
+ ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
+ addr);
+ return -ETIMEDOUT;
+ }
+
+ /* clear SI_CS_DONE_INT */
+ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
+
+ if (MS(reg, SI_CS_DONE_ERR)) {
+ ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
+ return -EIO;
+ }
+
+ /* extract receive data */
+ reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
+ *out = reg;
+
+ return 0;
+}
+
+static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
+ size_t *data_len)
+{
+ u8 *caldata = NULL;
+ size_t calsize, i;
+ int ret;
+
+ if (!QCA_REV_9887(ar))
+ return -EOPNOTSUPP;
+
+ calsize = ar->hw_params.cal_data_len;
+ caldata = kmalloc(calsize, GFP_KERNEL);
+ if (!caldata)
+ return -ENOMEM;
+
+ ath10k_pci_enable_eeprom(ar);
+
+ for (i = 0; i < calsize; i++) {
+ ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
+ if (ret)
+ goto err_free;
+ }
+
+ if (!ath10k_pci_validate_cal(caldata, calsize))
+ goto err_free;
+
+ *data = caldata;
+ *data_len = calsize;
+
+ return 0;
+
+err_free:
+ kfree(caldata);
+
+ return -EINVAL;
+}
+
+static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
+ .tx_sg = ath10k_pci_hif_tx_sg,
+ .diag_read = ath10k_pci_hif_diag_read,
+ .diag_write = ath10k_pci_diag_write_mem,
+ .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
+ .start = ath10k_pci_hif_start,
+ .stop = ath10k_pci_hif_stop,
+ .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_pci_hif_get_default_pipe,
+ .send_complete_check = ath10k_pci_hif_send_complete_check,
+ .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
+ .power_up = ath10k_pci_hif_power_up,
+ .power_down = ath10k_pci_hif_power_down,
+ .read32 = ath10k_pci_read32,
+ .write32 = ath10k_pci_write32,
+ .suspend = ath10k_pci_hif_suspend,
+ .resume = ath10k_pci_hif_resume,
+ .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
+};
+
+/*
+ * Top-level interrupt handler for all PCI interrupts from a Target.
+ * When a block of MSI interrupts is allocated, this top-level handler
+ * is not used; instead, we directly call the correct sub-handler.
+ */
+static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ if (ath10k_pci_has_device_gone(ar))
+ return IRQ_NONE;
+
+ ret = ath10k_pci_force_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX) &&
+ !ath10k_pci_irq_pending(ar))
+ return IRQ_NONE;
+
+ ath10k_pci_disable_and_clear_intx_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+ napi_schedule(&ar->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
+{
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ int done = 0;
+
+ if (ath10k_pci_has_fw_crashed(ar)) {
+ ath10k_pci_fw_crashed_clear(ar);
+ ath10k_pci_fw_crashed_dump(ar);
+ napi_complete(ctx);
+ return done;
+ }
+
+ ath10k_ce_per_engine_service_any(ar);
+
+ done = ath10k_htt_txrx_compl_task(ar, budget);
+
+ if (done < budget) {
+ napi_complete_done(ctx, done);
+ /* In case of MSI, it is possible that interrupts are received
+ * while NAPI poll is inprogress. So pending interrupts that are
+ * received after processing all copy engine pipes by NAPI poll
+ * will not be handled again. This is causing failure to
+ * complete boot sequence in x86 platform. So before enabling
+ * interrupts safer to check for pending interrupts for
+ * immediate servicing.
+ */
+ if (ath10k_ce_interrupt_summary(ar)) {
+ napi_schedule(ctx);
+ goto out;
+ }
+ ath10k_pci_enable_intx_irq(ar);
+ ath10k_pci_irq_msi_fw_unmask(ar);
+ }
+
+out:
+ return done;
+}
+
+static int ath10k_pci_request_irq_msi(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ret = request_irq(ar_pci->pdev->irq,
+ ath10k_pci_interrupt_handler,
+ IRQF_SHARED, "ath10k_pci", ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
+ ar_pci->pdev->irq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_pci_request_irq_intx(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ret = request_irq(ar_pci->pdev->irq,
+ ath10k_pci_interrupt_handler,
+ IRQF_SHARED, "ath10k_pci", ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
+ ar_pci->pdev->irq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_pci_request_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ switch (ar_pci->oper_irq_mode) {
+ case ATH10K_PCI_IRQ_INTX:
+ return ath10k_pci_request_irq_intx(ar);
+ case ATH10K_PCI_IRQ_MSI:
+ return ath10k_pci_request_irq_msi(ar);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ath10k_pci_free_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ free_irq(ar_pci->pdev->irq, ar);
+}
+
+void ath10k_pci_init_napi(struct ath10k *ar)
+{
+ netif_napi_add(ar->napi_dev, &ar->napi, ath10k_pci_napi_poll);
+}
+
+static int ath10k_pci_init_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ath10k_pci_init_napi(ar);
+
+ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
+ ath10k_info(ar, "limiting irq mode to: %d\n",
+ ath10k_pci_irq_mode);
+
+ /* Try MSI */
+ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_INTX) {
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
+ ret = pci_enable_msi(ar_pci->pdev);
+ if (ret == 0)
+ return 0;
+
+ /* MHI failed, try legacy irq next */
+ }
+
+ /* Try legacy irq
+ *
+ * A potential race occurs here: The CORE_BASE write
+ * depends on target correctly decoding AXI address but
+ * host won't know when target writes BAR to CORE_CTRL.
+ * This write might get lost if target has NOT written BAR.
+ * For now, fix the race by repeating the write in below
+ * synchronization checking.
+ */
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_INTX;
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+ return 0;
+}
+
+static void ath10k_pci_deinit_irq_intx(struct ath10k *ar)
+{
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+ 0);
+}
+
+static int ath10k_pci_deinit_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ switch (ar_pci->oper_irq_mode) {
+ case ATH10K_PCI_IRQ_INTX:
+ ath10k_pci_deinit_irq_intx(ar);
+ break;
+ default:
+ pci_disable_msi(ar_pci->pdev);
+ break;
+ }
+
+ return 0;
+}
+
+int ath10k_pci_wait_for_target_init(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long timeout;
+ u32 val;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
+
+ timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
+
+ do {
+ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
+ val);
+
+ /* target should never return this */
+ if (val == 0xffffffff)
+ continue;
+
+ /* the device has crashed so don't bother trying anymore */
+ if (val & FW_IND_EVENT_PENDING)
+ break;
+
+ if (val & FW_IND_INITIALIZED)
+ break;
+
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX)
+ /* Fix potential race by repeating CORE_BASE writes */
+ ath10k_pci_enable_intx_irq(ar);
+
+ mdelay(10);
+ } while (time_before(jiffies, timeout));
+
+ ath10k_pci_disable_and_clear_intx_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+
+ if (val == 0xffffffff) {
+ ath10k_err(ar, "failed to read device register, device is gone\n");
+ return -EIO;
+ }
+
+ if (val & FW_IND_EVENT_PENDING) {
+ ath10k_warn(ar, "device has crashed during init\n");
+ return -ECOMM;
+ }
+
+ if (!(val & FW_IND_INITIALIZED)) {
+ ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
+ val);
+ return -ETIMEDOUT;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
+ return 0;
+}
+
+static int ath10k_pci_cold_reset(struct ath10k *ar)
+{
+ u32 val;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
+
+ spin_lock_bh(&ar->data_lock);
+
+ ar->stats.fw_cold_reset_counter++;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ /* Put Target, including PCIe, into RESET. */
+ val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
+ val |= 1;
+ ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
+
+ /* After writing into SOC_GLOBAL_RESET to put device into
+ * reset and pulling out of reset pcie may not be stable
+ * for any immediate pcie register access and cause bus error,
+ * add delay before any pcie access request to fix this issue.
+ */
+ msleep(20);
+
+ /* Pull Target, including PCIe, out of RESET. */
+ val &= ~1;
+ ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
+
+ msleep(20);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
+
+ return 0;
+}
+
+static int ath10k_pci_claim(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+ int ret;
+
+ pci_set_drvdata(pdev, ar);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ ath10k_err(ar, "failed to enable pci device: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_request_region(pdev, BAR_NUM, "ath");
+ if (ret) {
+ ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
+ ret);
+ goto err_device;
+ }
+
+ /* Target expects 32 bit DMA. Enforce it. */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
+ goto err_region;
+ }
+
+ pci_set_master(pdev);
+
+ /* Arrange for access to Target SoC registers. */
+ ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
+ ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
+ if (!ar_pci->mem) {
+ ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
+ ret = -EIO;
+ goto err_region;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
+ return 0;
+
+err_region:
+ pci_release_region(pdev, BAR_NUM);
+
+err_device:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static void ath10k_pci_release(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+
+ pci_iounmap(pdev, ar_pci->mem);
+ pci_release_region(pdev, BAR_NUM);
+ pci_disable_device(pdev);
+}
+
+static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
+{
+ const struct ath10k_pci_supp_chip *supp_chip;
+ int i;
+ u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
+
+ for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
+ supp_chip = &ath10k_pci_supp_chips[i];
+
+ if (supp_chip->dev_id == dev_id &&
+ supp_chip->rev_id == rev_id)
+ return true;
+ }
+
+ return false;
+}
+
+int ath10k_pci_setup_resource(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int ret;
+
+ spin_lock_init(&ce->ce_lock);
+ spin_lock_init(&ar_pci->ps_lock);
+ mutex_init(&ar_pci->ce_diag_mutex);
+
+ INIT_WORK(&ar_pci->dump_work, ath10k_pci_fw_dump_work);
+
+ timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
+
+ ar_pci->attr = kmemdup(pci_host_ce_config_wlan,
+ sizeof(pci_host_ce_config_wlan),
+ GFP_KERNEL);
+ if (!ar_pci->attr)
+ return -ENOMEM;
+
+ ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,
+ sizeof(pci_target_ce_config_wlan),
+ GFP_KERNEL);
+ if (!ar_pci->pipe_config) {
+ ret = -ENOMEM;
+ goto err_free_attr;
+ }
+
+ ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,
+ sizeof(pci_target_service_to_ce_map_wlan),
+ GFP_KERNEL);
+ if (!ar_pci->serv_to_pipe) {
+ ret = -ENOMEM;
+ goto err_free_pipe_config;
+ }
+
+ if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
+ ath10k_pci_override_ce_config(ar);
+
+ ret = ath10k_pci_alloc_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
+ ret);
+ goto err_free_serv_to_pipe;
+ }
+
+ return 0;
+
+err_free_serv_to_pipe:
+ kfree(ar_pci->serv_to_pipe);
+err_free_pipe_config:
+ kfree(ar_pci->pipe_config);
+err_free_attr:
+ kfree(ar_pci->attr);
+ return ret;
+}
+
+void ath10k_pci_release_resource(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ath10k_pci_rx_retry_sync(ar);
+ netif_napi_del(&ar->napi);
+ ath10k_pci_ce_deinit(ar);
+ ath10k_pci_free_pipes(ar);
+ kfree(ar_pci->attr);
+ kfree(ar_pci->pipe_config);
+ kfree(ar_pci->serv_to_pipe);
+}
+
+static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
+ .read32 = ath10k_bus_pci_read32,
+ .write32 = ath10k_bus_pci_write32,
+ .get_num_banks = ath10k_pci_get_num_banks,
+};
+
+static int ath10k_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_dev)
+{
+ int ret = 0;
+ struct ath10k *ar;
+ struct ath10k_pci *ar_pci;
+ enum ath10k_hw_rev hw_rev;
+ struct ath10k_bus_params bus_params = {};
+ bool pci_ps, is_qca988x = false;
+ int (*pci_soft_reset)(struct ath10k *ar);
+ int (*pci_hard_reset)(struct ath10k *ar);
+ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
+
+ switch (pci_dev->device) {
+ case QCA988X_2_0_DEVICE_ID_UBNT:
+ case QCA988X_2_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA988X;
+ pci_ps = false;
+ is_qca988x = true;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca988x_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
+ break;
+ case QCA9887_1_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9887;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca988x_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
+ break;
+ case QCA6164_2_1_DEVICE_ID:
+ case QCA6174_2_1_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA6174;
+ pci_ps = true;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca6174_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
+ break;
+ case QCA99X0_2_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA99X0;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
+ break;
+ case QCA9984_1_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9984;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
+ break;
+ case QCA9888_2_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9888;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
+ break;
+ case QCA9377_1_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9377;
+ pci_ps = true;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca6174_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
+ break;
+ default:
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
+ hw_rev, &ath10k_pci_hif_ops);
+ if (!ar) {
+ dev_err(&pdev->dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
+ ar_pci = ath10k_pci_priv(ar);
+ ar_pci->pdev = pdev;
+ ar_pci->dev = &pdev->dev;
+ ar_pci->ar = ar;
+ ar->dev_id = pci_dev->device;
+ ar_pci->pci_ps = pci_ps;
+ ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
+ ar_pci->pci_soft_reset = pci_soft_reset;
+ ar_pci->pci_hard_reset = pci_hard_reset;
+ ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
+ ar->ce_priv = &ar_pci->ce;
+
+ ar->id.vendor = pdev->vendor;
+ ar->id.device = pdev->device;
+ ar->id.subsystem_vendor = pdev->subsystem_vendor;
+ ar->id.subsystem_device = pdev->subsystem_device;
+
+ timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
+
+ ret = ath10k_pci_setup_resource(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to setup resource: %d\n", ret);
+ goto err_core_destroy;
+ }
+
+ ret = ath10k_pci_claim(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to claim device: %d\n", ret);
+ goto err_free_pipes;
+ }
+
+ ret = ath10k_pci_force_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake up device : %d\n", ret);
+ goto err_sleep;
+ }
+
+ ath10k_pci_ce_deinit(ar);
+ ath10k_pci_irq_disable(ar);
+
+ ret = ath10k_pci_init_irq(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to init irqs: %d\n", ret);
+ goto err_sleep;
+ }
+
+ ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
+ ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
+ ath10k_pci_irq_mode, ath10k_pci_reset_mode);
+
+ ret = ath10k_pci_request_irq(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request irqs: %d\n", ret);
+ goto err_deinit_irq;
+ }
+
+ bus_params.dev_type = ATH10K_DEV_TYPE_LL;
+ bus_params.link_can_suspend = true;
+ /* Read CHIP_ID before reset to catch QCA9880-AR1A v1 devices that
+ * fall off the bus during chip_reset. These chips have the same pci
+ * device id as the QCA9880 BR4A or 2R4E. So that's why the check.
+ */
+ if (is_qca988x) {
+ bus_params.chip_id =
+ ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ if (bus_params.chip_id != 0xffffffff) {
+ if (!ath10k_pci_chip_is_supported(pdev->device,
+ bus_params.chip_id)) {
+ ret = -ENODEV;
+ goto err_unsupported;
+ }
+ }
+ }
+
+ ret = ath10k_pci_chip_reset(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to reset chip: %d\n", ret);
+ goto err_free_irq;
+ }
+
+ bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ if (bus_params.chip_id == 0xffffffff) {
+ ret = -ENODEV;
+ goto err_unsupported;
+ }
+
+ if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
+ ret = -ENODEV;
+ goto err_unsupported;
+ }
+
+ ret = ath10k_core_register(ar, &bus_params);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n", ret);
+ goto err_free_irq;
+ }
+
+ return 0;
+
+err_unsupported:
+ ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
+ pdev->device, bus_params.chip_id);
+
+err_free_irq:
+ ath10k_pci_free_irq(ar);
+
+err_deinit_irq:
+ ath10k_pci_release_resource(ar);
+
+err_sleep:
+ ath10k_pci_sleep_sync(ar);
+ ath10k_pci_release(ar);
+
+err_free_pipes:
+ ath10k_pci_free_pipes(ar);
+
+err_core_destroy:
+ ath10k_core_destroy(ar);
+
+ return ret;
+}
+
+static void ath10k_pci_remove(struct pci_dev *pdev)
+{
+ struct ath10k *ar = pci_get_drvdata(pdev);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
+
+ if (!ar)
+ return;
+
+ ath10k_core_unregister(ar);
+ ath10k_pci_free_irq(ar);
+ ath10k_pci_deinit_irq(ar);
+ ath10k_pci_release_resource(ar);
+ ath10k_pci_sleep_sync(ar);
+ ath10k_pci_release(ar);
+ ath10k_core_destroy(ar);
+}
+
+MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
+
+static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
+{
+ struct ath10k *ar = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath10k_pci_suspend(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
+
+ return ret;
+}
+
+static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
+{
+ struct ath10k *ar = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath10k_pci_resume(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to resume hif: %d\n", ret);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
+ ath10k_pci_pm_suspend,
+ ath10k_pci_pm_resume);
+
+static struct pci_driver ath10k_pci_driver = {
+ .name = "ath10k_pci",
+ .id_table = ath10k_pci_id_table,
+ .probe = ath10k_pci_probe,
+ .remove = ath10k_pci_remove,
+#ifdef CONFIG_PM
+ .driver.pm = &ath10k_pci_pm_ops,
+#endif
+};
+
+static int __init ath10k_pci_init(void)
+{
+ int ret1, ret2;
+
+ ret1 = pci_register_driver(&ath10k_pci_driver);
+ if (ret1)
+ printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
+ ret1);
+
+ ret2 = ath10k_ahb_init();
+ if (ret2)
+ printk(KERN_ERR "ahb init failed: %d\n", ret2);
+
+ if (ret1 && ret2)
+ return ret1;
+
+ /* registered to at least one bus */
+ return 0;
+}
+module_init(ath10k_pci_init);
+
+static void __exit ath10k_pci_exit(void)
+{
+ pci_unregister_driver(&ath10k_pci_driver);
+ ath10k_ahb_exit();
+}
+
+module_exit(ath10k_pci_exit);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Driver support for Qualcomm Atheros PCIe/AHB 802.11ac WLAN devices");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* QCA988x 2.0 firmware files */
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+
+/* QCA9887 1.0 firmware files */
+MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+
+/* QCA6174 2.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+
+/* QCA6174 3.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+
+/* QCA9377 1.0 firmware files */
+MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE);
+MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
new file mode 100644
index 000000000000..4c3f536f2ea1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _PCI_H_
+#define _PCI_H_
+
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+
+#include "hw.h"
+#include "ce.h"
+#include "ahb.h"
+
+/*
+ * maximum number of bytes that can be
+ * handled atomically by DiagRead/DiagWrite
+ */
+#define DIAG_TRANSFER_LIMIT 2048
+
+struct bmi_xfer {
+ bool tx_done;
+ bool rx_done;
+ bool wait_for_resp;
+ u32 resp_len;
+};
+
+/*
+ * PCI-specific Target state
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ *
+ * Much of this may be of interest to the Host so
+ * HOST_INTEREST->hi_interconnect_state points here
+ * (and all members are 32-bit quantities in order to
+ * facilitate Host access). In particular, Host software is
+ * required to initialize pipe_cfg_addr and svc_to_pipe_map.
+ */
+struct pcie_state {
+ /* Pipe configuration Target address */
+ /* NB: ce_pipe_config[CE_COUNT] */
+ u32 pipe_cfg_addr;
+
+ /* Service to pipe map Target address */
+ /* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */
+ u32 svc_to_pipe_map;
+
+ /* number of MSI interrupts requested */
+ u32 msi_requested;
+
+ /* number of MSI interrupts granted */
+ u32 msi_granted;
+
+ /* Message Signalled Interrupt address */
+ u32 msi_addr;
+
+ /* Base data */
+ u32 msi_data;
+
+ /*
+ * Data for firmware interrupt;
+ * MSI data for other interrupts are
+ * in various SoC registers
+ */
+ u32 msi_fw_intr_data;
+
+ /* PCIE_PWR_METHOD_* */
+ u32 power_mgmt_method;
+
+ /* PCIE_CONFIG_FLAG_* */
+ u32 config_flags;
+};
+
+/* PCIE_CONFIG_FLAG definitions */
+#define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001
+
+/* Per-pipe state. */
+struct ath10k_pci_pipe {
+ /* Handle of underlying Copy Engine */
+ struct ath10k_ce_pipe *ce_hdl;
+
+ /* Our pipe number; facilitates use of pipe_info ptrs. */
+ u8 pipe_num;
+
+ /* Convenience back pointer to hif_ce_state. */
+ struct ath10k *hif_ce_state;
+
+ size_t buf_sz;
+
+ /* protects compl_free and num_send_allowed */
+ spinlock_t pipe_lock;
+};
+
+struct ath10k_pci_supp_chip {
+ u32 dev_id;
+ u32 rev_id;
+};
+
+enum ath10k_pci_irq_mode {
+ ATH10K_PCI_IRQ_AUTO = 0,
+ ATH10K_PCI_IRQ_INTX = 1,
+ ATH10K_PCI_IRQ_MSI = 2,
+};
+
+struct ath10k_pci {
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct ath10k *ar;
+ void __iomem *mem;
+ size_t mem_len;
+
+ /* Operating interrupt mode */
+ enum ath10k_pci_irq_mode oper_irq_mode;
+
+ struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
+
+ /* Copy Engine used for Diagnostic Accesses */
+ struct ath10k_ce_pipe *ce_diag;
+ /* For protecting ce_diag */
+ struct mutex ce_diag_mutex;
+
+ struct work_struct dump_work;
+
+ struct ath10k_ce ce;
+ struct timer_list rx_post_retry;
+
+ /* Due to HW quirks it is recommended to disable ASPM during device
+ * bootup. To do that the original PCI-E Link Control is stored before
+ * device bootup is executed and re-programmed later.
+ */
+ u16 link_ctl;
+
+ /* Protects ps_awake and ps_wake_refcount */
+ spinlock_t ps_lock;
+
+ /* The device has a special powersave-oriented register. When device is
+ * considered asleep it drains less power and driver is forbidden from
+ * accessing most MMIO registers. If host were to access them without
+ * waking up the device might scribble over host memory or return
+ * 0xdeadbeef readouts.
+ */
+ unsigned long ps_wake_refcount;
+
+ /* Waking up takes some time (up to 2ms in some cases) so it can be bad
+ * for latency. To mitigate this the device isn't immediately allowed
+ * to sleep after all references are undone - instead there's a grace
+ * period after which the powersave register is updated unless some
+ * activity to/from device happened in the meantime.
+ *
+ * Also see comments on ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC.
+ */
+ struct timer_list ps_timer;
+
+ /* MMIO registers are used to communicate with the device. With
+ * intensive traffic accessing powersave register would be a bit
+ * wasteful overhead and would needlessly stall CPU. It is far more
+ * efficient to rely on a variable in RAM and update it only upon
+ * powersave register state changes.
+ */
+ bool ps_awake;
+
+ /* pci power save, disable for QCA988X and QCA99X0.
+ * Writing 'false' to this variable avoids frequent locking
+ * on MMIO read/write.
+ */
+ bool pci_ps;
+
+ /* Chip specific pci reset routine used to do a safe reset */
+ int (*pci_soft_reset)(struct ath10k *ar);
+
+ /* Chip specific pci full reset function */
+ int (*pci_hard_reset)(struct ath10k *ar);
+
+ /* chip specific methods for converting target CPU virtual address
+ * space to CE address space
+ */
+ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
+
+ struct ce_attr *attr;
+ struct ce_pipe_config *pipe_config;
+ struct ce_service_to_pipe *serv_to_pipe;
+
+ /* Keep this entry in the last, memory for struct ath10k_ahb is
+ * allocated (ahb support enabled case) in the continuation of
+ * this struct.
+ */
+ struct ath10k_ahb ahb[];
+
+};
+
+static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
+{
+ return (struct ath10k_pci *)ar->drv_priv;
+}
+
+#define ATH10K_PCI_RX_POST_RETRY_MS 50
+#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
+#define PCIE_WAKE_TIMEOUT 30000 /* 30ms */
+#define PCIE_WAKE_LATE_US 10000 /* 10ms */
+
+#define BAR_NUM 0
+
+#define CDC_WAR_MAGIC_STR 0xceef0000
+#define CDC_WAR_DATA_CE 4
+
+/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
+#define DIAG_ACCESS_CE_TIMEOUT_US 10000 /* 10 ms */
+#define DIAG_ACCESS_CE_WAIT_US 50
+
+void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value);
+void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val);
+void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val);
+
+u32 ath10k_pci_read32(struct ath10k *ar, u32 offset);
+u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr);
+u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr);
+
+int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items);
+int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len);
+int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
+ const void *data, int nbytes);
+int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, void *req, u32 req_len,
+ void *resp, u32 *resp_len);
+int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, u8 *ul_pipe,
+ u8 *dl_pipe);
+void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+ int force);
+u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe);
+void ath10k_pci_hif_power_down(struct ath10k *ar);
+int ath10k_pci_alloc_pipes(struct ath10k *ar);
+void ath10k_pci_free_pipes(struct ath10k *ar);
+void ath10k_pci_rx_replenish_retry(struct timer_list *t);
+void ath10k_pci_ce_deinit(struct ath10k *ar);
+void ath10k_pci_init_napi(struct ath10k *ar);
+int ath10k_pci_init_pipes(struct ath10k *ar);
+int ath10k_pci_init_config(struct ath10k *ar);
+void ath10k_pci_rx_post(struct ath10k *ar);
+void ath10k_pci_flush(struct ath10k *ar);
+void ath10k_pci_enable_intx_irq(struct ath10k *ar);
+bool ath10k_pci_irq_pending(struct ath10k *ar);
+void ath10k_pci_disable_and_clear_intx_irq(struct ath10k *ar);
+void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar);
+int ath10k_pci_wait_for_target_init(struct ath10k *ar);
+int ath10k_pci_setup_resource(struct ath10k *ar);
+void ath10k_pci_release_resource(struct ath10k *ar);
+
+/* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too
+ * frequently. To avoid this put SoC to sleep after a very conservative grace
+ * period. Adjust with great care.
+ */
+#define ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC 60
+
+#endif /* _PCI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
new file mode 100644
index 000000000000..8275345631a0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -0,0 +1,1140 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/platform_device.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/string.h>
+#include <net/sock.h>
+
+#include "debug.h"
+#include "snoc.h"
+
+#define ATH10K_QMI_CLIENT_ID 0x4b4e454c
+#define ATH10K_QMI_TIMEOUT 30
+#define SMEM_IMAGE_VERSION_TABLE 469
+#define SMEM_IMAGE_TABLE_CNSS_INDEX 13
+#define SMEM_IMAGE_VERSION_ENTRY_SIZE 128
+#define SMEM_IMAGE_VERSION_NAME_SIZE 75
+
+static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi,
+ struct ath10k_msa_mem_info *mem_info)
+{
+ struct qcom_scm_vmperm dst_perms[3];
+ struct ath10k *ar = qmi->ar;
+ u64 src_perms;
+ u32 perm_count;
+ int ret;
+
+ src_perms = BIT(QCOM_SCM_VMID_HLOS);
+
+ dst_perms[0].vmid = QCOM_SCM_VMID_MSS_MSA;
+ dst_perms[0].perm = QCOM_SCM_PERM_RW;
+ dst_perms[1].vmid = QCOM_SCM_VMID_WLAN;
+ dst_perms[1].perm = QCOM_SCM_PERM_RW;
+
+ if (mem_info->secure) {
+ perm_count = 2;
+ } else {
+ dst_perms[2].vmid = QCOM_SCM_VMID_WLAN_CE;
+ dst_perms[2].perm = QCOM_SCM_PERM_RW;
+ perm_count = 3;
+ }
+
+ ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
+ &src_perms, dst_perms, perm_count);
+ if (ret < 0)
+ ath10k_err(ar, "failed to assign msa map permissions: %d\n", ret);
+
+ return ret;
+}
+
+static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi *qmi,
+ struct ath10k_msa_mem_info *mem_info)
+{
+ struct qcom_scm_vmperm dst_perms;
+ struct ath10k *ar = qmi->ar;
+ u64 src_perms;
+ int ret;
+
+ src_perms = BIT(QCOM_SCM_VMID_MSS_MSA) | BIT(QCOM_SCM_VMID_WLAN);
+
+ if (!mem_info->secure)
+ src_perms |= BIT(QCOM_SCM_VMID_WLAN_CE);
+
+ dst_perms.vmid = QCOM_SCM_VMID_HLOS;
+ dst_perms.perm = QCOM_SCM_PERM_RW;
+
+ ret = qcom_scm_assign_mem(mem_info->addr, mem_info->size,
+ &src_perms, &dst_perms, 1);
+ if (ret < 0)
+ ath10k_err(ar, "failed to unmap msa permissions: %d\n", ret);
+
+ return ret;
+}
+
+static int ath10k_qmi_setup_msa_permissions(struct ath10k_qmi *qmi)
+{
+ int ret;
+ int i;
+
+ if (qmi->msa_fixed_perm)
+ return 0;
+
+ for (i = 0; i < qmi->nr_mem_region; i++) {
+ ret = ath10k_qmi_map_msa_permission(qmi, &qmi->mem_region[i]);
+ if (ret)
+ goto err_unmap;
+ }
+
+ return 0;
+
+err_unmap:
+ for (i--; i >= 0; i--)
+ ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
+ return ret;
+}
+
+static void ath10k_qmi_remove_msa_permission(struct ath10k_qmi *qmi)
+{
+ int i;
+
+ if (qmi->msa_fixed_perm)
+ return;
+
+ for (i = 0; i < qmi->nr_mem_region; i++)
+ ath10k_qmi_unmap_msa_permission(qmi, &qmi->mem_region[i]);
+}
+
+static int ath10k_qmi_msa_mem_info_send_sync_msg(struct ath10k_qmi *qmi)
+{
+ struct wlfw_msa_info_resp_msg_v01 resp = {};
+ struct wlfw_msa_info_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ phys_addr_t max_mapped_addr;
+ struct qmi_txn txn;
+ int ret;
+ int i;
+
+ req.msa_addr = ar->msa.paddr;
+ req.size = ar->msa.mem_size;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_msa_info_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_MSA_INFO_REQ_V01,
+ WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_msa_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send msa mem info req: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "msa info req rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp.mem_region_info_len > QMI_WLFW_MAX_MEM_REG_V01) {
+ ath10k_err(ar, "invalid memory region length received: %d\n",
+ resp.mem_region_info_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ max_mapped_addr = ar->msa.paddr + ar->msa.mem_size;
+ qmi->nr_mem_region = resp.mem_region_info_len;
+ for (i = 0; i < resp.mem_region_info_len; i++) {
+ if (resp.mem_region_info[i].size > ar->msa.mem_size ||
+ resp.mem_region_info[i].region_addr > max_mapped_addr ||
+ resp.mem_region_info[i].region_addr < ar->msa.paddr ||
+ resp.mem_region_info[i].size +
+ resp.mem_region_info[i].region_addr > max_mapped_addr) {
+ ath10k_err(ar, "received out of range memory region address 0x%llx with size 0x%x, aborting\n",
+ resp.mem_region_info[i].region_addr,
+ resp.mem_region_info[i].size);
+ ret = -EINVAL;
+ goto fail_unwind;
+ }
+ qmi->mem_region[i].addr = resp.mem_region_info[i].region_addr;
+ qmi->mem_region[i].size = resp.mem_region_info[i].size;
+ qmi->mem_region[i].secure = resp.mem_region_info[i].secure_flag;
+ ath10k_dbg(ar, ATH10K_DBG_QMI,
+ "qmi msa mem region %d addr 0x%pa size 0x%x flag 0x%08x\n",
+ i, &qmi->mem_region[i].addr,
+ qmi->mem_region[i].size,
+ qmi->mem_region[i].secure);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem info request completed\n");
+ return 0;
+
+fail_unwind:
+ memset(&qmi->mem_region[0], 0, sizeof(qmi->mem_region[0]) * i);
+out:
+ return ret;
+}
+
+static int ath10k_qmi_msa_ready_send_sync_msg(struct ath10k_qmi *qmi)
+{
+ struct wlfw_msa_ready_resp_msg_v01 resp = {};
+ struct wlfw_msa_ready_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_msa_ready_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_MSA_READY_REQ_V01,
+ WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_msa_ready_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send msa mem ready request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "msa ready request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa mem ready request completed\n");
+ return 0;
+
+out:
+ return ret;
+}
+
+static int ath10k_qmi_bdf_dnld_send_sync(struct ath10k_qmi *qmi)
+{
+ struct wlfw_bdf_download_resp_msg_v01 resp = {};
+ struct wlfw_bdf_download_req_msg_v01 *req;
+ struct ath10k *ar = qmi->ar;
+ unsigned int remaining;
+ struct qmi_txn txn;
+ const u8 *temp;
+ int ret;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ temp = ar->normal_mode_fw.board_data;
+ remaining = ar->normal_mode_fw.board_len;
+
+ while (remaining) {
+ req->valid = 1;
+ req->file_id_valid = 1;
+ req->file_id = 0;
+ req->total_size_valid = 1;
+ req->total_size = ar->normal_mode_fw.board_len;
+ req->seg_id_valid = 1;
+ req->data_valid = 1;
+ req->end_valid = 1;
+
+ if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ memcpy(req->data, temp, req->data_len);
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_bdf_download_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_BDF_DOWNLOAD_REQ_V01,
+ WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_bdf_download_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+
+ if (ret < 0)
+ goto out;
+
+ /* end = 1 triggers a CRC check on the BDF. If this fails, we
+ * get a QMI_ERR_MALFORMED_MSG_V01 error, but the FW is still
+ * willing to use the BDF. For some platforms, all the valid
+ * released BDFs fail this CRC check, so attempt to detect this
+ * scenario and treat it as non-fatal.
+ */
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
+ !(req->end == 1 &&
+ resp.resp.result == QMI_ERR_MALFORMED_MSG_V01)) {
+ ath10k_err(ar, "failed to download board data file: %d\n",
+ resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi bdf download request completed\n");
+
+ kfree(req);
+ return 0;
+
+out:
+ kfree(req);
+ return ret;
+}
+
+static int ath10k_qmi_send_cal_report_req(struct ath10k_qmi *qmi)
+{
+ struct wlfw_cal_report_resp_msg_v01 resp = {};
+ struct wlfw_cal_report_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct qmi_txn txn;
+ int i, j = 0;
+ int ret;
+
+ if (ar_snoc->xo_cal_supported) {
+ req.xo_cal_data_valid = 1;
+ req.xo_cal_data = ar_snoc->xo_cal_data;
+ }
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cal_report_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ for (i = 0; i < QMI_WLFW_MAX_NUM_CAL_V01; i++) {
+ if (qmi->cal_data[i].total_size &&
+ qmi->cal_data[i].data) {
+ req.meta_data[j] = qmi->cal_data[i].cal_id;
+ j++;
+ }
+ }
+ req.meta_data_len = j;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_CAL_REPORT_REQ_V01,
+ WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_cal_report_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send calibration request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "calibration request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi cal report request completed\n");
+ return 0;
+
+out:
+ return ret;
+}
+
+static int
+ath10k_qmi_mode_send_sync_msg(struct ath10k *ar, enum wlfw_driver_mode_enum_v01 mode)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_qmi *qmi = ar_snoc->qmi;
+ struct wlfw_wlan_mode_resp_msg_v01 resp = {};
+ struct wlfw_wlan_mode_req_msg_v01 req = {};
+ struct qmi_txn txn;
+ int ret;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_wlan_mode_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ req.mode = mode;
+ req.hw_debug_valid = 1;
+ req.hw_debug = 0;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_WLAN_MODE_REQ_V01,
+ WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_wlan_mode_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send wlan mode %d request: %d\n", mode, ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "more request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wlan mode req completed: %d\n", mode);
+ return 0;
+
+out:
+ return ret;
+}
+
+static int
+ath10k_qmi_cfg_send_sync_msg(struct ath10k *ar,
+ struct ath10k_qmi_wlan_enable_cfg *config,
+ const char *version)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_qmi *qmi = ar_snoc->qmi;
+ struct wlfw_wlan_cfg_resp_msg_v01 resp = {};
+ struct wlfw_wlan_cfg_req_msg_v01 *req;
+ struct qmi_txn txn;
+ int ret;
+ u32 i;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_wlan_cfg_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ req->host_version_valid = 0;
+
+ req->tgt_cfg_valid = 1;
+ if (config->num_ce_tgt_cfg > QMI_WLFW_MAX_NUM_CE_V01)
+ req->tgt_cfg_len = QMI_WLFW_MAX_NUM_CE_V01;
+ else
+ req->tgt_cfg_len = config->num_ce_tgt_cfg;
+ for (i = 0; i < req->tgt_cfg_len; i++) {
+ req->tgt_cfg[i].pipe_num = config->ce_tgt_cfg[i].pipe_num;
+ req->tgt_cfg[i].pipe_dir = config->ce_tgt_cfg[i].pipe_dir;
+ req->tgt_cfg[i].nentries = config->ce_tgt_cfg[i].nentries;
+ req->tgt_cfg[i].nbytes_max = config->ce_tgt_cfg[i].nbytes_max;
+ req->tgt_cfg[i].flags = config->ce_tgt_cfg[i].flags;
+ }
+
+ req->svc_cfg_valid = 1;
+ if (config->num_ce_svc_pipe_cfg > QMI_WLFW_MAX_NUM_SVC_V01)
+ req->svc_cfg_len = QMI_WLFW_MAX_NUM_SVC_V01;
+ else
+ req->svc_cfg_len = config->num_ce_svc_pipe_cfg;
+ for (i = 0; i < req->svc_cfg_len; i++) {
+ req->svc_cfg[i].service_id = config->ce_svc_cfg[i].service_id;
+ req->svc_cfg[i].pipe_dir = config->ce_svc_cfg[i].pipe_dir;
+ req->svc_cfg[i].pipe_num = config->ce_svc_cfg[i].pipe_num;
+ }
+
+ req->shadow_reg_valid = 1;
+ if (config->num_shadow_reg_cfg >
+ QMI_WLFW_MAX_NUM_SHADOW_REG_V01)
+ req->shadow_reg_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01;
+ else
+ req->shadow_reg_len = config->num_shadow_reg_cfg;
+
+ memcpy(req->shadow_reg, config->shadow_reg_cfg,
+ sizeof(struct wlfw_shadow_reg_cfg_s_v01) * req->shadow_reg_len);
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_WLAN_CFG_REQ_V01,
+ WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_wlan_cfg_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send config request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "config request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi config request completed\n");
+ kfree(req);
+ return 0;
+
+out:
+ kfree(req);
+ return ret;
+}
+
+int ath10k_qmi_wlan_enable(struct ath10k *ar,
+ struct ath10k_qmi_wlan_enable_cfg *config,
+ enum wlfw_driver_mode_enum_v01 mode,
+ const char *version)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi mode %d config %p\n",
+ mode, config);
+
+ ret = ath10k_qmi_cfg_send_sync_msg(ar, config, version);
+ if (ret) {
+ ath10k_err(ar, "failed to send qmi config: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_qmi_mode_send_sync_msg(ar, mode);
+ if (ret) {
+ ath10k_err(ar, "failed to send qmi mode: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_qmi_wlan_disable(struct ath10k *ar)
+{
+ return ath10k_qmi_mode_send_sync_msg(ar, QMI_WLFW_OFF_V01);
+}
+
+static void ath10k_qmi_add_wlan_ver_smem(struct ath10k *ar, const char *fw_build_id)
+{
+ u8 *table_ptr;
+ size_t smem_item_size;
+ const u32 smem_img_idx_wlan = SMEM_IMAGE_TABLE_CNSS_INDEX *
+ SMEM_IMAGE_VERSION_ENTRY_SIZE;
+
+ table_ptr = qcom_smem_get(QCOM_SMEM_HOST_ANY,
+ SMEM_IMAGE_VERSION_TABLE,
+ &smem_item_size);
+
+ if (IS_ERR(table_ptr)) {
+ ath10k_err(ar, "smem image version table not found\n");
+ return;
+ }
+
+ if (smem_img_idx_wlan + SMEM_IMAGE_VERSION_ENTRY_SIZE >
+ smem_item_size) {
+ ath10k_err(ar, "smem block size too small: %zu\n",
+ smem_item_size);
+ return;
+ }
+
+ strscpy(table_ptr + smem_img_idx_wlan, fw_build_id,
+ SMEM_IMAGE_VERSION_NAME_SIZE);
+}
+
+static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
+{
+ struct wlfw_cap_resp_msg_v01 *resp;
+ struct wlfw_cap_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct qmi_txn txn;
+ int ret;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_cap_resp_msg_v01_ei, resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_CAP_REQ_V01,
+ WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send capability request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "capability req rejected: %d\n", resp->resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp->chip_info_valid) {
+ qmi->chip_info.chip_id = resp->chip_info.chip_id;
+ qmi->chip_info.chip_family = resp->chip_info.chip_family;
+ } else {
+ qmi->chip_info.chip_id = 0xFF;
+ }
+
+ if (resp->board_info_valid)
+ qmi->board_info.board_id = resp->board_info.board_id;
+ else
+ qmi->board_info.board_id = 0xFF;
+
+ if (resp->soc_info_valid)
+ qmi->soc_info.soc_id = resp->soc_info.soc_id;
+
+ if (resp->fw_version_info_valid) {
+ qmi->fw_version = resp->fw_version_info.fw_version;
+ strscpy(qmi->fw_build_timestamp, resp->fw_version_info.fw_build_timestamp,
+ sizeof(qmi->fw_build_timestamp));
+ }
+
+ if (resp->fw_build_id_valid)
+ strscpy(qmi->fw_build_id, resp->fw_build_id,
+ MAX_BUILD_ID_LEN + 1);
+
+ if (!test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
+ ath10k_info(ar, "qmi chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x",
+ qmi->chip_info.chip_id, qmi->chip_info.chip_family,
+ qmi->board_info.board_id, qmi->soc_info.soc_id);
+ ath10k_info(ar, "qmi fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
+ qmi->fw_version, qmi->fw_build_timestamp, qmi->fw_build_id);
+ }
+
+ if (resp->fw_build_id_valid)
+ ath10k_qmi_add_wlan_ver_smem(ar, qmi->fw_build_id);
+
+ kfree(resp);
+ return 0;
+
+out:
+ kfree(resp);
+ return ret;
+}
+
+static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
+{
+ struct wlfw_host_cap_resp_msg_v01 resp = {};
+ struct wlfw_host_cap_req_msg_v01 req = {};
+ const struct qmi_elem_info *req_ei;
+ struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct qmi_txn txn;
+ int ret;
+
+ req.daemon_support_valid = 1;
+ req.daemon_support = 0;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_host_cap_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ if (test_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags))
+ req_ei = wlfw_host_cap_8bit_req_msg_v01_ei;
+ else
+ req_ei = wlfw_host_cap_req_msg_v01_ei;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_HOST_CAP_REQ_V01,
+ WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN,
+ req_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send host capability request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ /* older FW didn't support this request, which is not fatal */
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01 &&
+ resp.resp.error != QMI_ERR_NOT_SUPPORTED_V01) {
+ ath10k_err(ar, "host capability request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capability request completed\n");
+ return 0;
+
+out:
+ return ret;
+}
+
+int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct wlfw_ini_resp_msg_v01 resp = {};
+ struct ath10k_qmi *qmi = ar_snoc->qmi;
+ struct wlfw_ini_req_msg_v01 req = {};
+ struct qmi_txn txn;
+ int ret;
+
+ req.enablefwlog_valid = 1;
+ req.enablefwlog = fw_log_mode;
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn, wlfw_ini_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_INI_REQ_V01,
+ WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_ini_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send fw log request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "fw log request rejected: %d\n",
+ resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi fw log request completed, mode: %d\n",
+ fw_log_mode);
+ return 0;
+
+out:
+ return ret;
+}
+
+static int
+ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
+{
+ struct wlfw_ind_register_resp_msg_v01 resp = {};
+ struct wlfw_ind_register_req_msg_v01 req = {};
+ struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct qmi_txn txn;
+ int ret;
+
+ req.client_id_valid = 1;
+ req.client_id = ATH10K_QMI_CLIENT_ID;
+ req.fw_ready_enable_valid = 1;
+ req.fw_ready_enable = 1;
+ req.msa_ready_enable_valid = 1;
+ req.msa_ready_enable = 1;
+
+ if (ar_snoc->xo_cal_supported) {
+ req.xo_cal_enable_valid = 1;
+ req.xo_cal_enable = 1;
+ }
+
+ ret = qmi_txn_init(&qmi->qmi_hdl, &txn,
+ wlfw_ind_register_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&qmi->qmi_hdl, NULL, &txn,
+ QMI_WLFW_IND_REGISTER_REQ_V01,
+ WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_ind_register_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath10k_err(ar, "failed to send indication registered request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, ATH10K_QMI_TIMEOUT * HZ);
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath10k_err(ar, "indication request rejected: %d\n", resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp.fw_status_valid) {
+ if (resp.fw_status & QMI_WLFW_FW_READY_V01)
+ qmi->fw_ready = true;
+ }
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi indication register request completed\n");
+ return 0;
+
+out:
+ return ret;
+}
+
+static void ath10k_qmi_event_server_arrive(struct ath10k_qmi *qmi)
+{
+ struct ath10k *ar = qmi->ar;
+ int ret;
+
+ ret = ath10k_qmi_ind_register_send_sync_msg(qmi);
+ if (ret)
+ return;
+
+ if (qmi->fw_ready) {
+ ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
+ return;
+ }
+
+ ret = ath10k_qmi_host_cap_send_sync(qmi);
+ if (ret)
+ return;
+
+ ret = ath10k_qmi_msa_mem_info_send_sync_msg(qmi);
+ if (ret)
+ return;
+
+ /*
+ * HACK: sleep for a while between receiving the msa info response
+ * and the XPU update to prevent SDM845 from crashing due to a security
+ * violation, when running MPSS.AT.4.0.c2-01184-SDM845_GEN_PACK-1.
+ */
+ msleep(20);
+
+ ret = ath10k_qmi_setup_msa_permissions(qmi);
+ if (ret)
+ return;
+
+ ret = ath10k_qmi_msa_ready_send_sync_msg(qmi);
+ if (ret)
+ goto err_setup_msa;
+
+ ret = ath10k_qmi_cap_send_sync_msg(qmi);
+ if (ret)
+ goto err_setup_msa;
+
+ return;
+
+err_setup_msa:
+ ath10k_qmi_remove_msa_permission(qmi);
+}
+
+static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi)
+{
+ struct ath10k *ar = qmi->ar;
+ int ret;
+
+ ar->hif.bus = ATH10K_BUS_SNOC;
+ ar->id.qmi_ids_valid = true;
+ ar->id.qmi_board_id = qmi->board_info.board_id;
+ ar->id.qmi_chip_id = qmi->chip_info.chip_id;
+ ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR;
+
+ ret = ath10k_core_check_dt(ar);
+ if (ret)
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "DT bdf variant name not set.\n");
+
+ return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD);
+}
+
+static int
+ath10k_qmi_driver_event_post(struct ath10k_qmi *qmi,
+ enum ath10k_qmi_driver_event_type type,
+ void *data)
+{
+ struct ath10k_qmi_driver_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return -ENOMEM;
+
+ event->type = type;
+ event->data = data;
+
+ spin_lock(&qmi->event_lock);
+ list_add_tail(&event->list, &qmi->event_list);
+ spin_unlock(&qmi->event_lock);
+
+ queue_work(qmi->event_wq, &qmi->event_work);
+
+ return 0;
+}
+
+static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi)
+{
+ struct ath10k *ar = qmi->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ ath10k_qmi_remove_msa_permission(qmi);
+ ath10k_core_free_board_files(ar);
+ if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags) &&
+ !test_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags))
+ ath10k_snoc_fw_crashed_dump(ar);
+
+ ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND);
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service disconnected\n");
+}
+
+static void ath10k_qmi_event_msa_ready(struct ath10k_qmi *qmi)
+{
+ int ret;
+
+ ret = ath10k_qmi_fetch_board_file(qmi);
+ if (ret)
+ goto out;
+
+ ret = ath10k_qmi_bdf_dnld_send_sync(qmi);
+ if (ret)
+ goto out;
+
+ ret = ath10k_qmi_send_cal_report_req(qmi);
+
+out:
+ return;
+}
+
+static int ath10k_qmi_event_fw_ready_ind(struct ath10k_qmi *qmi)
+{
+ struct ath10k *ar = qmi->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw ready event received\n");
+ ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_READY_IND);
+
+ return 0;
+}
+
+static void ath10k_qmi_fw_ready_ind(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
+
+ ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_FW_READY_IND, NULL);
+}
+
+static void ath10k_qmi_msa_ready_ind(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn, const void *data)
+{
+ struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
+
+ ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_MSA_READY_IND, NULL);
+}
+
+static const struct qmi_msg_handler qmi_msg_handler[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_READY_IND_V01,
+ .ei = wlfw_fw_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct wlfw_fw_ready_ind_msg_v01),
+ .fn = ath10k_qmi_fw_ready_ind,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_MSA_READY_IND_V01,
+ .ei = wlfw_msa_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct wlfw_msa_ready_ind_msg_v01),
+ .fn = ath10k_qmi_msa_ready_ind,
+ },
+ {}
+};
+
+static int ath10k_qmi_new_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath10k_qmi *qmi = container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
+ struct sockaddr_qrtr *sq = &qmi->sq;
+ struct ath10k *ar = qmi->ar;
+ int ret;
+
+ sq->sq_family = AF_QIPCRTR;
+ sq->sq_node = service->node;
+ sq->sq_port = service->port;
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "wifi fw qmi service found\n");
+
+ ret = kernel_connect(qmi_hdl->sock, (struct sockaddr_unsized *)&qmi->sq,
+ sizeof(qmi->sq), 0);
+ if (ret) {
+ ath10k_err(ar, "failed to connect to a remote QMI service port\n");
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi wifi fw qmi service connected\n");
+ ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_ARRIVE, NULL);
+
+ return ret;
+}
+
+static void ath10k_qmi_del_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath10k_qmi *qmi =
+ container_of(qmi_hdl, struct ath10k_qmi, qmi_hdl);
+
+ qmi->fw_ready = false;
+
+ /*
+ * The del_server event is to be processed only if coming from
+ * the qmi server. The qmi infrastructure sends del_server, when
+ * any client releases the qmi handle. In this case do not process
+ * this del_server event.
+ */
+ if (qmi->state == ATH10K_QMI_STATE_INIT_DONE)
+ ath10k_qmi_driver_event_post(qmi, ATH10K_QMI_EVENT_SERVER_EXIT,
+ NULL);
+}
+
+static const struct qmi_ops ath10k_qmi_ops = {
+ .new_server = ath10k_qmi_new_server,
+ .del_server = ath10k_qmi_del_server,
+};
+
+static void ath10k_qmi_driver_event_work(struct work_struct *work)
+{
+ struct ath10k_qmi *qmi = container_of(work, struct ath10k_qmi,
+ event_work);
+ struct ath10k_qmi_driver_event *event;
+ struct ath10k *ar = qmi->ar;
+
+ spin_lock(&qmi->event_lock);
+ while (!list_empty(&qmi->event_list)) {
+ event = list_first_entry(&qmi->event_list,
+ struct ath10k_qmi_driver_event, list);
+ list_del(&event->list);
+ spin_unlock(&qmi->event_lock);
+
+ switch (event->type) {
+ case ATH10K_QMI_EVENT_SERVER_ARRIVE:
+ ath10k_qmi_event_server_arrive(qmi);
+ if (qmi->no_msa_ready_indicator) {
+ ath10k_info(ar, "qmi not waiting for msa_ready indicator");
+ ath10k_qmi_event_msa_ready(qmi);
+ }
+ break;
+ case ATH10K_QMI_EVENT_SERVER_EXIT:
+ ath10k_qmi_event_server_exit(qmi);
+ break;
+ case ATH10K_QMI_EVENT_FW_READY_IND:
+ ath10k_qmi_event_fw_ready_ind(qmi);
+ break;
+ case ATH10K_QMI_EVENT_MSA_READY_IND:
+ if (qmi->no_msa_ready_indicator) {
+ ath10k_warn(ar, "qmi unexpected msa_ready indicator");
+ break;
+ }
+ ath10k_qmi_event_msa_ready(qmi);
+ break;
+ default:
+ ath10k_warn(ar, "invalid event type: %d", event->type);
+ break;
+ }
+ kfree(event);
+ spin_lock(&qmi->event_lock);
+ }
+ spin_unlock(&qmi->event_lock);
+}
+
+int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct device *dev = ar->dev;
+ struct ath10k_qmi *qmi;
+ int ret;
+
+ qmi = kzalloc(sizeof(*qmi), GFP_KERNEL);
+ if (!qmi)
+ return -ENOMEM;
+
+ qmi->ar = ar;
+ ar_snoc->qmi = qmi;
+
+ if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
+ qmi->msa_fixed_perm = true;
+
+ if (of_property_read_bool(dev->of_node, "qcom,no-msa-ready-indicator"))
+ qmi->no_msa_ready_indicator = true;
+
+ ret = qmi_handle_init(&qmi->qmi_hdl,
+ WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
+ &ath10k_qmi_ops, qmi_msg_handler);
+ if (ret)
+ goto err;
+
+ qmi->event_wq = alloc_ordered_workqueue("ath10k_qmi_driver_event", 0);
+ if (!qmi->event_wq) {
+ ath10k_err(ar, "failed to allocate workqueue\n");
+ ret = -EFAULT;
+ goto err_release_qmi_handle;
+ }
+
+ INIT_LIST_HEAD(&qmi->event_list);
+ spin_lock_init(&qmi->event_lock);
+ INIT_WORK(&qmi->event_work, ath10k_qmi_driver_event_work);
+
+ ret = qmi_add_lookup(&qmi->qmi_hdl, WLFW_SERVICE_ID_V01,
+ WLFW_SERVICE_VERS_V01, 0);
+ if (ret)
+ goto err_qmi_lookup;
+
+ qmi->state = ATH10K_QMI_STATE_INIT_DONE;
+ return 0;
+
+err_qmi_lookup:
+ destroy_workqueue(qmi->event_wq);
+
+err_release_qmi_handle:
+ qmi_handle_release(&qmi->qmi_hdl);
+
+err:
+ kfree(qmi);
+ return ret;
+}
+
+int ath10k_qmi_deinit(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_qmi *qmi = ar_snoc->qmi;
+
+ qmi->state = ATH10K_QMI_STATE_DEINIT;
+ qmi_handle_release(&qmi->qmi_hdl);
+ cancel_work_sync(&qmi->event_work);
+ destroy_workqueue(qmi->event_wq);
+ kfree(qmi);
+ ar_snoc->qmi = NULL;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h
new file mode 100644
index 000000000000..0816eb4e4a18
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+#ifndef _ATH10K_QMI_H_
+#define _ATH10K_QMI_H_
+
+#include <linux/soc/qcom/qmi.h>
+#include <linux/qrtr.h>
+#include "qmi_wlfw_v01.h"
+
+#define MAX_NUM_MEMORY_REGIONS 2
+#define MAX_TIMESTAMP_LEN 32
+#define MAX_BUILD_ID_LEN 128
+#define MAX_NUM_CAL_V01 5
+
+enum ath10k_qmi_driver_event_type {
+ ATH10K_QMI_EVENT_SERVER_ARRIVE,
+ ATH10K_QMI_EVENT_SERVER_EXIT,
+ ATH10K_QMI_EVENT_FW_READY_IND,
+ ATH10K_QMI_EVENT_FW_DOWN_IND,
+ ATH10K_QMI_EVENT_MSA_READY_IND,
+ ATH10K_QMI_EVENT_MAX,
+};
+
+struct ath10k_msa_mem_info {
+ phys_addr_t addr;
+ u32 size;
+ bool secure;
+};
+
+struct ath10k_qmi_chip_info {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct ath10k_qmi_board_info {
+ u32 board_id;
+};
+
+struct ath10k_qmi_soc_info {
+ u32 soc_id;
+};
+
+struct ath10k_qmi_cal_data {
+ u32 cal_id;
+ u32 total_size;
+ u8 *data;
+};
+
+struct ath10k_tgt_pipe_cfg {
+ __le32 pipe_num;
+ __le32 pipe_dir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+struct ath10k_svc_pipe_cfg {
+ __le32 service_id;
+ __le32 pipe_dir;
+ __le32 pipe_num;
+};
+
+struct ath10k_shadow_reg_cfg {
+ __le16 ce_id;
+ __le16 reg_offset;
+};
+
+struct ath10k_qmi_wlan_enable_cfg {
+ u32 num_ce_tgt_cfg;
+ struct ath10k_tgt_pipe_cfg *ce_tgt_cfg;
+ u32 num_ce_svc_pipe_cfg;
+ struct ath10k_svc_pipe_cfg *ce_svc_cfg;
+ u32 num_shadow_reg_cfg;
+ struct ath10k_shadow_reg_cfg *shadow_reg_cfg;
+};
+
+struct ath10k_qmi_driver_event {
+ struct list_head list;
+ enum ath10k_qmi_driver_event_type type;
+ void *data;
+};
+
+enum ath10k_qmi_state {
+ ATH10K_QMI_STATE_INIT_DONE,
+ ATH10K_QMI_STATE_DEINIT,
+};
+
+struct ath10k_qmi {
+ struct ath10k *ar;
+ struct qmi_handle qmi_hdl;
+ struct sockaddr_qrtr sq;
+ struct work_struct event_work;
+ struct workqueue_struct *event_wq;
+ struct list_head event_list;
+ spinlock_t event_lock; /* spinlock for qmi event list */
+ u32 nr_mem_region;
+ struct ath10k_msa_mem_info mem_region[MAX_NUM_MEMORY_REGIONS];
+ struct ath10k_qmi_chip_info chip_info;
+ struct ath10k_qmi_board_info board_info;
+ struct ath10k_qmi_soc_info soc_info;
+ char fw_build_id[MAX_BUILD_ID_LEN + 1];
+ u32 fw_version;
+ bool fw_ready;
+ char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1];
+ struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01];
+ bool msa_fixed_perm;
+ bool no_msa_ready_indicator;
+ enum ath10k_qmi_state state;
+};
+
+int ath10k_qmi_wlan_enable(struct ath10k *ar,
+ struct ath10k_qmi_wlan_enable_cfg *config,
+ enum wlfw_driver_mode_enum_v01 mode,
+ const char *version);
+int ath10k_qmi_wlan_disable(struct ath10k *ar);
+int ath10k_qmi_init(struct ath10k *ar, u32 msa_size);
+int ath10k_qmi_deinit(struct ath10k *ar);
+int ath10k_qmi_set_fw_log_mode(struct ath10k *ar, u8 fw_log_mode);
+
+#endif /* ATH10K_QMI_H */
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
new file mode 100644
index 000000000000..0e85c75d2278
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
@@ -0,0 +1,2309 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/soc/qcom/qmi.h>
+#include <linux/types.h>
+#include "qmi_wlfw_v01.h"
+
+static const struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ nentries),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ nbytes_max),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
+ flags),
+ },
+ {}
+};
+
+static const struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ service_id),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {}
+};
+
+static const struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+ id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
+ offset),
+ },
+ {}
+};
+
+static const struct qmi_elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01,
+ addr),
+ },
+ {}
+};
+
+static const struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ region_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_memory_region_info_s_v01,
+ secure_flag),
+ },
+ {}
+};
+
+static const struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_cfg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_cfg_s_v01,
+ secure_flag),
+ },
+ {}
+};
+
+static const struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ mem_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_CFG_V01,
+ .elem_size = sizeof(struct wlfw_mem_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_s_v01,
+ mem_cfg),
+ .ei_array = wlfw_mem_cfg_s_v01_ei,
+ },
+ {}
+};
+
+static const struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
+ type),
+ },
+ {}
+};
+
+static const struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_chip_info_s_v01,
+ chip_family),
+ },
+ {}
+};
+
+static const struct qmi_elem_info wlfw_rf_board_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_rf_board_info_s_v01,
+ board_id),
+ },
+ {}
+};
+
+static const struct qmi_elem_info wlfw_soc_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_soc_info_s_v01,
+ soc_id),
+ },
+ {}
+};
+
+static const struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_fw_version_info_s_v01,
+ fw_version),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_fw_version_info_s_v01,
+ fw_build_timestamp),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ msa_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ msa_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ pin_connect_result_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ pin_connect_result_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ request_mem_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ request_mem_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ mem_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ mem_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_init_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ fw_init_done_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ rejuvenate_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ rejuvenate_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ xo_cal_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct wlfw_ind_register_req_msg_v01,
+ xo_cal_enable),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_ind_register_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_resp_msg_v01,
+ fw_status_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ind_register_resp_msg_v01,
+ fw_status),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
+ {}
+};
+
+const struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
+ {}
+};
+
+const struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ pwr_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ pwr_pin_result),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ phy_io_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ phy_io_pin_result),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ rf_pin_result_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
+ rf_pin_result),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_driver_mode_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ hw_debug_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
+ hw_debug),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_wlan_mode_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ host_version_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ host_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ tgt_cfg),
+ .ei_array = wlfw_ce_tgt_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SVC_V01,
+ .elem_size = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ svc_cfg),
+ .ei_array = wlfw_ce_svc_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01,
+ .elem_size = sizeof(struct wlfw_shadow_reg_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg),
+ .ei_array = wlfw_shadow_reg_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_SHADOW_REG_V2,
+ .elem_size = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2),
+ .ei_array = wlfw_shadow_reg_v2_cfg_s_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_wlan_cfg_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_cap_req_msg_v01_ei[] = {
+ {}
+};
+
+const struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rf_chip_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ chip_info),
+ .ei_array = wlfw_rf_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ board_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_rf_board_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ board_info),
+ .ei_array = wlfw_rf_board_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ soc_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_soc_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ soc_info),
+ .ei_array = wlfw_soc_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_version_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct wlfw_fw_version_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_version_info),
+ .ei_array = wlfw_fw_version_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_build_id_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ fw_build_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ num_macs_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_cap_resp_msg_v01,
+ num_macs),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ bdf_type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
+ bdf_type),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_bdf_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ meta_data_len),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = QMI_WLFW_MAX_NUM_CAL_V01,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ meta_data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ xo_cal_data_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ xo_cal_data),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_report_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_initiate_cal_download_ind_msg_v01,
+ cal_id),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_download_req_msg_v01,
+ end),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01,
+ total_size),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_cal_update_req_msg_v01,
+ cal_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_update_req_msg_v01,
+ seg_id),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
+ end),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_msa_info_req_msg_v01,
+ msa_addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_info_req_msg_v01,
+ size),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ mem_region_info_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_MEM_REG_V01,
+ .elem_size = sizeof(struct wlfw_memory_region_info_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
+ mem_region_info),
+ .ei_array = wlfw_memory_region_info_s_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
+ {}
+};
+
+const struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_msa_ready_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ini_req_msg_v01,
+ enablefwlog_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_ini_req_msg_v01,
+ enablefwlog),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_ini_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ mem_type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
+ data_len),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
+ data),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
+ mem_type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x03,
+ .offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
+ data),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_athdiag_write_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_vbatt_req_msg_v01,
+ voltage_uv),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_vbatt_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
+ mac_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAC_ADDR_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
+ mac_addr),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_mac_addr_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ wake_msi_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ wake_msi),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ gpios_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ gpios_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01,
+ .elem_size = sizeof(u32),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ gpios),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ nm_modem_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ nm_modem),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ bdf_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ bdf_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ bdf_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ bdf_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ m3_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ m3_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ m3_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ m3_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_filesys_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_filesys_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_done_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ cal_done),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ mem_bucket_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ mem_bucket),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ mem_cfg_mode_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ mem_cfg_mode),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ daemon_support),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_host_cap_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_request_mem_ind_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct wlfw_mem_seg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_request_mem_ind_msg_v01,
+ mem_seg),
+ .ei_array = wlfw_mem_seg_s_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct wlfw_mem_seg_resp_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
+ mem_seg),
+ .ei_array = wlfw_mem_seg_resp_s_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_respond_mem_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[] = {
+ {}
+};
+
+const struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = {
+ {}
+};
+
+const struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ cause_for_rejuvenation_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ cause_for_rejuvenation),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ requesting_sub_system_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ requesting_sub_system),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ line_number_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ line_number),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ function_name_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
+ function_name),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
+ {}
+};
+
+const struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_rejuvenate_ack_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01,
+ mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01,
+ mask),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ prev_mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ prev_mask),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ curr_mask_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
+ curr_mask),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_m3_info_req_msg_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_m3_info_req_msg_v01,
+ size),
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct wlfw_m3_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {}
+};
+
+const struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_xo_cal_ind_msg_v01,
+ xo_cal_data),
+ },
+ {}
+};
diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
new file mode 100644
index 000000000000..9f311f3bc9e7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
@@ -0,0 +1,694 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef WCN3990_QMI_SVC_V01_H
+#define WCN3990_QMI_SVC_V01_H
+
+#define WLFW_SERVICE_ID_V01 0x45
+#define WLFW_SERVICE_VERS_V01 0x01
+
+#define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025
+#define QMI_WLFW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B
+#define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A
+#define QMI_WLFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLFW_M3_INFO_REQ_V01 0x003C
+#define QMI_WLFW_CAP_REQ_V01 0x0024
+#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
+#define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026
+#define QMI_WLFW_M3_INFO_RESP_V01 0x003C
+#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
+#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027
+#define QMI_WLFW_XO_CAL_IND_V01 0x003D
+#define QMI_WLFW_INI_RESP_V01 0x002F
+#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026
+#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033
+#define QMI_WLFW_INITIATE_CAL_DOWNLOAD_IND_V01 0x0028
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MSA_READY_IND_V01 0x002B
+#define QMI_WLFW_ATHDIAG_WRITE_RESP_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_REJUVENATE_IND_V01 0x0039
+#define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B
+#define QMI_WLFW_ATHDIAG_WRITE_REQ_V01 0x0031
+#define QMI_WLFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLFW_PIN_CONNECT_RESULT_IND_V01 0x002C
+#define QMI_WLFW_FW_READY_IND_V01 0x0021
+#define QMI_WLFW_MSA_READY_RESP_V01 0x002E
+#define QMI_WLFW_CAL_UPDATE_REQ_V01 0x0029
+#define QMI_WLFW_INI_REQ_V01 0x002F
+#define QMI_WLFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLFW_REJUVENATE_ACK_RESP_V01 0x003A
+#define QMI_WLFW_MSA_INFO_RESP_V01 0x002D
+#define QMI_WLFW_MSA_READY_REQ_V01 0x002E
+#define QMI_WLFW_CAP_RESP_V01 0x0024
+#define QMI_WLFW_REJUVENATE_ACK_REQ_V01 0x003A
+#define QMI_WLFW_ATHDIAG_READ_RESP_V01 0x0030
+#define QMI_WLFW_VBATT_REQ_V01 0x0032
+#define QMI_WLFW_MAC_ADDR_REQ_V01 0x0033
+#define QMI_WLFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLFW_VBATT_RESP_V01 0x0032
+#define QMI_WLFW_MSA_INFO_REQ_V01 0x002D
+#define QMI_WLFW_CAL_DOWNLOAD_REQ_V01 0x0027
+#define QMI_WLFW_ATHDIAG_READ_REQ_V01 0x0030
+#define QMI_WLFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020
+
+#define QMI_WLFW_MAX_MEM_REG_V01 2
+#define QMI_WLFW_MAX_NUM_MEM_SEG_V01 16
+#define QMI_WLFW_MAX_NUM_CAL_V01 5
+#define QMI_WLFW_MAX_DATA_SIZE_V01 6144
+#define QMI_WLFW_FUNCTION_NAME_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_CE_V01 12
+#define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32
+#define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144
+#define QMI_WLFW_MAX_NUM_GPIO_V01 32
+#define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128
+#define QMI_WLFW_MAX_NUM_MEM_CFG_V01 2
+#define QMI_WLFW_MAX_STR_LEN_V01 16
+#define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLFW_MAC_ADDR_SIZE_V01 6
+#define QMI_WLFW_MAX_SHADOW_REG_V2 36
+#define QMI_WLFW_MAX_NUM_SVC_V01 24
+
+enum wlfw_driver_mode_enum_v01 {
+ QMI_WLFW_MISSION_V01 = 0,
+ QMI_WLFW_FTM_V01 = 1,
+ QMI_WLFW_EPPING_V01 = 2,
+ QMI_WLFW_WALTEST_V01 = 3,
+ QMI_WLFW_OFF_V01 = 4,
+ QMI_WLFW_CCPM_V01 = 5,
+ QMI_WLFW_QVIT_V01 = 6,
+ QMI_WLFW_CALIBRATION_V01 = 7,
+};
+
+enum wlfw_cal_temp_id_enum_v01 {
+ QMI_WLFW_CAL_TEMP_IDX_0_V01 = 0,
+ QMI_WLFW_CAL_TEMP_IDX_1_V01 = 1,
+ QMI_WLFW_CAL_TEMP_IDX_2_V01 = 2,
+ QMI_WLFW_CAL_TEMP_IDX_3_V01 = 3,
+ QMI_WLFW_CAL_TEMP_IDX_4_V01 = 4,
+};
+
+enum wlfw_pipedir_enum_v01 {
+ QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+ QMI_WLFW_PIPEDIR_IN_V01 = 1,
+ QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+ QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+};
+
+enum wlfw_mem_type_enum_v01 {
+ QMI_WLFW_MEM_TYPE_MSA_V01 = 0,
+ QMI_WLFW_MEM_TYPE_DDR_V01 = 1,
+};
+
+#define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00)
+#define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01)
+#define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02)
+#define QMI_WLFW_CE_ATTR_SWIZZLE_DESCRIPTORS_V01 ((u32)0x04)
+#define QMI_WLFW_CE_ATTR_DISABLE_INTR_V01 ((u32)0x08)
+#define QMI_WLFW_CE_ATTR_ENABLE_POLL_V01 ((u32)0x10)
+
+#define QMI_WLFW_ALREADY_REGISTERED_V01 ((u64)0x01ULL)
+#define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL)
+#define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL)
+#define QMI_WLFW_MEM_READY_V01 ((u64)0x08ULL)
+#define QMI_WLFW_FW_INIT_DONE_V01 ((u64)0x10ULL)
+
+#define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL)
+
+struct wlfw_ce_tgt_pipe_cfg_s_v01 {
+ __le32 pipe_num;
+ __le32 pipe_dir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+};
+
+struct wlfw_ce_svc_pipe_cfg_s_v01 {
+ __le32 service_id;
+ __le32 pipe_dir;
+ __le32 pipe_num;
+};
+
+struct wlfw_shadow_reg_cfg_s_v01 {
+ u16 id;
+ u16 offset;
+};
+
+struct wlfw_shadow_reg_v2_cfg_s_v01 {
+ u32 addr;
+};
+
+struct wlfw_memory_region_info_s_v01 {
+ u64 region_addr;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct wlfw_mem_cfg_s_v01 {
+ u64 offset;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct wlfw_mem_seg_s_v01 {
+ u32 size;
+ enum wlfw_mem_type_enum_v01 type;
+ u32 mem_cfg_len;
+ struct wlfw_mem_cfg_s_v01 mem_cfg[QMI_WLFW_MAX_NUM_MEM_CFG_V01];
+};
+
+struct wlfw_mem_seg_resp_s_v01 {
+ u64 addr;
+ u32 size;
+ enum wlfw_mem_type_enum_v01 type;
+};
+
+struct wlfw_rf_chip_info_s_v01 {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct wlfw_rf_board_info_s_v01 {
+ u32 board_id;
+};
+
+struct wlfw_soc_info_s_v01 {
+ u32 soc_id;
+};
+
+struct wlfw_fw_version_info_s_v01 {
+ u32 fw_version;
+ char fw_build_timestamp[QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+struct wlfw_ind_register_req_msg_v01 {
+ u8 fw_ready_enable_valid;
+ u8 fw_ready_enable;
+ u8 initiate_cal_download_enable_valid;
+ u8 initiate_cal_download_enable;
+ u8 initiate_cal_update_enable_valid;
+ u8 initiate_cal_update_enable;
+ u8 msa_ready_enable_valid;
+ u8 msa_ready_enable;
+ u8 pin_connect_result_enable_valid;
+ u8 pin_connect_result_enable;
+ u8 client_id_valid;
+ u32 client_id;
+ u8 request_mem_enable_valid;
+ u8 request_mem_enable;
+ u8 mem_ready_enable_valid;
+ u8 mem_ready_enable;
+ u8 fw_init_done_enable_valid;
+ u8 fw_init_done_enable;
+ u8 rejuvenate_enable_valid;
+ u32 rejuvenate_enable;
+ u8 xo_cal_enable_valid;
+ u8 xo_cal_enable;
+};
+
+#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 50
+extern const struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[];
+
+struct wlfw_ind_register_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 fw_status_valid;
+ u64 fw_status;
+};
+
+#define WLFW_IND_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 18
+extern const struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[];
+
+struct wlfw_fw_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_FW_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern const struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[];
+
+struct wlfw_msa_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_MSA_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern const struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[];
+
+struct wlfw_pin_connect_result_ind_msg_v01 {
+ u8 pwr_pin_result_valid;
+ u32 pwr_pin_result;
+ u8 phy_io_pin_result_valid;
+ u32 phy_io_pin_result;
+ u8 rf_pin_result_valid;
+ u32 rf_pin_result;
+};
+
+#define WLFW_PIN_CONNECT_RESULT_IND_MSG_V01_MAX_MSG_LEN 21
+extern const struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[];
+
+struct wlfw_wlan_mode_req_msg_v01 {
+ enum wlfw_driver_mode_enum_v01 mode;
+ u8 hw_debug_valid;
+ u8 hw_debug;
+};
+
+#define WLFW_WLAN_MODE_REQ_MSG_V01_MAX_MSG_LEN 11
+extern const struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[];
+
+struct wlfw_wlan_mode_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_MODE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern const struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_req_msg_v01 {
+ u8 host_version_valid;
+ char host_version[QMI_WLFW_MAX_STR_LEN_V01 + 1];
+ u8 tgt_cfg_valid;
+ u32 tgt_cfg_len;
+ struct wlfw_ce_tgt_pipe_cfg_s_v01 tgt_cfg[QMI_WLFW_MAX_NUM_CE_V01];
+ u8 svc_cfg_valid;
+ u32 svc_cfg_len;
+ struct wlfw_ce_svc_pipe_cfg_s_v01 svc_cfg[QMI_WLFW_MAX_NUM_SVC_V01];
+ u8 shadow_reg_valid;
+ u32 shadow_reg_len;
+ struct wlfw_shadow_reg_cfg_s_v01 shadow_reg[QMI_WLFW_MAX_NUM_SHADOW_REG_V01];
+ u8 shadow_reg_v2_valid;
+ u32 shadow_reg_v2_len;
+ struct wlfw_shadow_reg_v2_cfg_s_v01 shadow_reg_v2[QMI_WLFW_MAX_SHADOW_REG_V2];
+};
+
+#define WLFW_WLAN_CFG_REQ_MSG_V01_MAX_MSG_LEN 803
+extern const struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[];
+
+struct wlfw_wlan_cfg_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_WLAN_CFG_RESP_MSG_V01_MAX_MSG_LEN 7
+extern const struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[];
+
+struct wlfw_cap_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_CAP_REQ_MSG_V01_MAX_MSG_LEN 0
+extern const struct qmi_elem_info wlfw_cap_req_msg_v01_ei[];
+
+struct wlfw_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 chip_info_valid;
+ struct wlfw_rf_chip_info_s_v01 chip_info;
+ u8 board_info_valid;
+ struct wlfw_rf_board_info_s_v01 board_info;
+ u8 soc_info_valid;
+ struct wlfw_soc_info_s_v01 soc_info;
+ u8 fw_version_info_valid;
+ struct wlfw_fw_version_info_s_v01 fw_version_info;
+ u8 fw_build_id_valid;
+ char fw_build_id[QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1];
+ u8 num_macs_valid;
+ u8 num_macs;
+};
+
+#define WLFW_CAP_RESP_MSG_V01_MAX_MSG_LEN 207
+extern const struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[];
+
+struct wlfw_bdf_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+ u8 bdf_type_valid;
+ u8 bdf_type;
+};
+
+#define WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6182
+extern const struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[];
+
+struct wlfw_bdf_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern const struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[];
+
+struct wlfw_cal_report_req_msg_v01 {
+ u32 meta_data_len;
+ enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01];
+ u8 xo_cal_data_valid;
+ u8 xo_cal_data;
+};
+
+#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 28
+extern const struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[];
+
+struct wlfw_cal_report_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_REPORT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern const struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_download_ind_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+};
+
+#define WLFW_INITIATE_CAL_DOWNLOAD_IND_MSG_V01_MAX_MSG_LEN 7
+extern const struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[];
+
+struct wlfw_cal_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_CAL_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN 6178
+extern const struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[];
+
+struct wlfw_cal_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_CAL_DOWNLOAD_RESP_MSG_V01_MAX_MSG_LEN 7
+extern const struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[];
+
+struct wlfw_initiate_cal_update_ind_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u32 total_size;
+};
+
+#define WLFW_INITIATE_CAL_UPDATE_IND_MSG_V01_MAX_MSG_LEN 14
+extern const struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[];
+
+struct wlfw_cal_update_req_msg_v01 {
+ enum wlfw_cal_temp_id_enum_v01 cal_id;
+ u32 seg_id;
+};
+
+#define WLFW_CAL_UPDATE_REQ_MSG_V01_MAX_MSG_LEN 14
+extern const struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[];
+
+struct wlfw_cal_update_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 file_id_valid;
+ enum wlfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+};
+
+#define WLFW_CAL_UPDATE_RESP_MSG_V01_MAX_MSG_LEN 6181
+extern const struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[];
+
+struct wlfw_msa_info_req_msg_v01 {
+ u64 msa_addr;
+ u32 size;
+};
+
+#define WLFW_MSA_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern const struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[];
+
+struct wlfw_msa_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u32 mem_region_info_len;
+ struct wlfw_memory_region_info_s_v01 mem_region_info[QMI_WLFW_MAX_MEM_REG_V01];
+};
+
+#define WLFW_MSA_INFO_RESP_MSG_V01_MAX_MSG_LEN 37
+extern const struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[];
+
+struct wlfw_msa_ready_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_MSA_READY_REQ_MSG_V01_MAX_MSG_LEN 0
+extern const struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[];
+
+struct wlfw_msa_ready_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MSA_READY_RESP_MSG_V01_MAX_MSG_LEN 7
+extern const struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[];
+
+struct wlfw_ini_req_msg_v01 {
+ u8 enablefwlog_valid;
+ u8 enablefwlog;
+};
+
+#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
+extern const struct qmi_elem_info wlfw_ini_req_msg_v01_ei[];
+
+struct wlfw_ini_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_INI_RESP_MSG_V01_MAX_MSG_LEN 7
+extern const struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_read_req_msg_v01 {
+ u32 offset;
+ u32 mem_type;
+ u32 data_len;
+};
+
+#define WLFW_ATHDIAG_READ_REQ_MSG_V01_MAX_MSG_LEN 21
+extern const struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[];
+
+struct wlfw_athdiag_read_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_READ_RESP_MSG_V01_MAX_MSG_LEN 6156
+extern const struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[];
+
+struct wlfw_athdiag_write_req_msg_v01 {
+ u32 offset;
+ u32 mem_type;
+ u32 data_len;
+ u8 data[QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01];
+};
+
+#define WLFW_ATHDIAG_WRITE_REQ_MSG_V01_MAX_MSG_LEN 6163
+extern const struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[];
+
+struct wlfw_athdiag_write_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_ATHDIAG_WRITE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern const struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[];
+
+struct wlfw_vbatt_req_msg_v01 {
+ u64 voltage_uv;
+};
+
+#define WLFW_VBATT_REQ_MSG_V01_MAX_MSG_LEN 11
+extern const struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[];
+
+struct wlfw_vbatt_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_VBATT_RESP_MSG_V01_MAX_MSG_LEN 7
+extern const struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[];
+
+struct wlfw_mac_addr_req_msg_v01 {
+ u8 mac_addr_valid;
+ u8 mac_addr[QMI_WLFW_MAC_ADDR_SIZE_V01];
+};
+
+#define WLFW_MAC_ADDR_REQ_MSG_V01_MAX_MSG_LEN 9
+extern const struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[];
+
+struct wlfw_mac_addr_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_MAC_ADDR_RESP_MSG_V01_MAX_MSG_LEN 7
+extern const struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[];
+
+#define QMI_WLFW_MAX_NUM_GPIO_V01 32
+struct wlfw_host_cap_req_msg_v01 {
+ u8 daemon_support_valid;
+ u32 daemon_support;
+ u8 wake_msi_valid;
+ u32 wake_msi;
+ u8 gpios_valid;
+ u32 gpios_len;
+ u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01];
+ u8 nm_modem_valid;
+ u8 nm_modem;
+ u8 bdf_support_valid;
+ u8 bdf_support;
+ u8 bdf_cache_support_valid;
+ u8 bdf_cache_support;
+ u8 m3_support_valid;
+ u8 m3_support;
+ u8 m3_cache_support_valid;
+ u8 m3_cache_support;
+ u8 cal_filesys_support_valid;
+ u8 cal_filesys_support;
+ u8 cal_cache_support_valid;
+ u8 cal_cache_support;
+ u8 cal_done_valid;
+ u8 cal_done;
+ u8 mem_bucket_valid;
+ u32 mem_bucket;
+ u8 mem_cfg_mode_valid;
+ u8 mem_cfg_mode;
+};
+
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189
+extern const struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[];
+extern const struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[];
+
+struct wlfw_host_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_HOST_CAP_RESP_MSG_V01_MAX_MSG_LEN 7
+extern const struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[];
+
+struct wlfw_request_mem_ind_msg_v01 {
+ u32 mem_seg_len;
+ struct wlfw_mem_seg_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
+};
+
+#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 564
+extern const struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[];
+
+struct wlfw_respond_mem_req_msg_v01 {
+ u32 mem_seg_len;
+ struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01];
+};
+
+#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 260
+extern const struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[];
+
+struct wlfw_respond_mem_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_RESPOND_MEM_RESP_MSG_V01_MAX_MSG_LEN 7
+extern const struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[];
+
+struct wlfw_mem_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0
+extern const struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[];
+
+struct wlfw_fw_init_done_ind_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_FW_INIT_DONE_IND_MSG_V01_MAX_MSG_LEN 0
+extern const struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ind_msg_v01 {
+ u8 cause_for_rejuvenation_valid;
+ u8 cause_for_rejuvenation;
+ u8 requesting_sub_system_valid;
+ u8 requesting_sub_system;
+ u8 line_number_valid;
+ u16 line_number;
+ u8 function_name_valid;
+ char function_name[QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1];
+};
+
+#define WLFW_REJUVENATE_IND_MSG_V01_MAX_MSG_LEN 144
+extern const struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_req_msg_v01 {
+ char placeholder;
+};
+
+#define WLFW_REJUVENATE_ACK_REQ_MSG_V01_MAX_MSG_LEN 0
+extern const struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[];
+
+struct wlfw_rejuvenate_ack_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_REJUVENATE_ACK_RESP_MSG_V01_MAX_MSG_LEN 7
+extern const struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_req_msg_v01 {
+ u8 mask_valid;
+ u64 mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_REQ_MSG_V01_MAX_MSG_LEN 11
+extern const struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[];
+
+struct wlfw_dynamic_feature_mask_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 prev_mask_valid;
+ u64 prev_mask;
+ u8 curr_mask_valid;
+ u64 curr_mask;
+};
+
+#define WLFW_DYNAMIC_FEATURE_MASK_RESP_MSG_V01_MAX_MSG_LEN 29
+extern const struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[];
+
+struct wlfw_m3_info_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+#define WLFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+extern const struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[];
+
+struct wlfw_m3_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+extern const struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[];
+
+struct wlfw_xo_cal_ind_msg_v01 {
+ u8 xo_cal_data;
+};
+
+#define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4
+extern const struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[];
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
new file mode 100644
index 000000000000..564293df1e9a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -0,0 +1,1336 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _RX_DESC_H_
+#define _RX_DESC_H_
+
+#include <linux/bitops.h>
+
+enum rx_attention_flags {
+ RX_ATTENTION_FLAGS_FIRST_MPDU = BIT(0),
+ RX_ATTENTION_FLAGS_LAST_MPDU = BIT(1),
+ RX_ATTENTION_FLAGS_MCAST_BCAST = BIT(2),
+ RX_ATTENTION_FLAGS_PEER_IDX_INVALID = BIT(3),
+ RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT = BIT(4),
+ RX_ATTENTION_FLAGS_POWER_MGMT = BIT(5),
+ RX_ATTENTION_FLAGS_NON_QOS = BIT(6),
+ RX_ATTENTION_FLAGS_NULL_DATA = BIT(7),
+ RX_ATTENTION_FLAGS_MGMT_TYPE = BIT(8),
+ RX_ATTENTION_FLAGS_CTRL_TYPE = BIT(9),
+ RX_ATTENTION_FLAGS_MORE_DATA = BIT(10),
+ RX_ATTENTION_FLAGS_EOSP = BIT(11),
+ RX_ATTENTION_FLAGS_U_APSD_TRIGGER = BIT(12),
+ RX_ATTENTION_FLAGS_FRAGMENT = BIT(13),
+ RX_ATTENTION_FLAGS_ORDER = BIT(14),
+ RX_ATTENTION_FLAGS_CLASSIFICATION = BIT(15),
+ RX_ATTENTION_FLAGS_OVERFLOW_ERR = BIT(16),
+ RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR = BIT(17),
+ RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = BIT(18),
+ RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL = BIT(19),
+ RX_ATTENTION_FLAGS_SA_IDX_INVALID = BIT(20),
+ RX_ATTENTION_FLAGS_DA_IDX_INVALID = BIT(21),
+ RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT = BIT(22),
+ RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT = BIT(23),
+ RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED = BIT(24),
+ RX_ATTENTION_FLAGS_DIRECTED = BIT(25),
+ RX_ATTENTION_FLAGS_BUFFER_FRAGMENT = BIT(26),
+ RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR = BIT(27),
+ RX_ATTENTION_FLAGS_TKIP_MIC_ERR = BIT(28),
+ RX_ATTENTION_FLAGS_DECRYPT_ERR = BIT(29),
+ RX_ATTENTION_FLAGS_FCS_ERR = BIT(30),
+ RX_ATTENTION_FLAGS_MSDU_DONE = BIT(31),
+};
+
+struct rx_attention {
+ __le32 flags; /* %RX_ATTENTION_FLAGS_ */
+} __packed;
+
+/*
+ * first_mpdu
+ * Indicates the first MSDU of the PPDU. If both first_mpdu
+ * and last_mpdu are set in the MSDU then this is a not an
+ * A-MPDU frame but a stand alone MPDU. Interior MPDU in an
+ * A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ * 0. The PPDU start status will only be valid when this bit
+ * is set.
+ *
+ * last_mpdu
+ * Indicates the last MSDU of the last MPDU of the PPDU. The
+ * PPDU end status will only be valid when this bit is set.
+ *
+ * mcast_bcast
+ * Multicast / broadcast indicator. Only set when the MAC
+ * address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ * matches one of the 4 BSSID registers. Only set when
+ * first_msdu is set.
+ *
+ * peer_idx_invalid
+ * Indicates no matching entries within the max search
+ * count. Only set when first_msdu is set.
+ *
+ * peer_idx_timeout
+ * Indicates an unsuccessful search for the peer index due to
+ * timeout. Only set when first_msdu is set.
+ *
+ * power_mgmt
+ * Power management bit set in the 802.11 header. Only set
+ * when first_msdu is set.
+ *
+ * non_qos
+ * Set if packet is not a non-QoS data frame. Only set when
+ * first_msdu is set.
+ *
+ * null_data
+ * Set if frame type indicates either null data or QoS null
+ * data format. Only set when first_msdu is set.
+ *
+ * mgmt_type
+ * Set if packet is a management packet. Only set when
+ * first_msdu is set.
+ *
+ * ctrl_type
+ * Set if packet is a control packet. Only set when first_msdu
+ * is set.
+ *
+ * more_data
+ * Set if more bit in frame control is set. Only set when
+ * first_msdu is set.
+ *
+ * eosp
+ * Set if the EOSP (end of service period) bit in the QoS
+ * control field is set. Only set when first_msdu is set.
+ *
+ * u_apsd_trigger
+ * Set if packet is U-APSD trigger. Key table will have bits
+ * per TID to indicate U-APSD trigger.
+ *
+ * fragment
+ * Indicates that this is an 802.11 fragment frame. This is
+ * set when either the more_frag bit is set in the frame
+ * control or the fragment number is not zero. Only set when
+ * first_msdu is set.
+ *
+ * order
+ * Set if the order bit in the frame control is set. Only set
+ * when first_msdu is set.
+ *
+ * classification
+ * Indicates that this status has a corresponding MSDU that
+ * requires FW processing. The OLE will have classification
+ * ring mask registers which will indicate the ring(s) for
+ * packets and descriptors which need FW attention.
+ *
+ * overflow_err
+ * PCU Receive FIFO does not have enough space to store the
+ * full receive packet. Enough space is reserved in the
+ * receive FIFO for the status is written. This MPDU remaining
+ * packets in the PPDU will be filtered and no Ack response
+ * will be transmitted.
+ *
+ * msdu_length_err
+ * Indicates that the MSDU length from the 802.3 encapsulated
+ * length field extends beyond the MPDU boundary.
+ *
+ * tcp_udp_chksum_fail
+ * Indicates that the computed checksum (tcp_udp_chksum) did
+ * not match the checksum in the TCP/UDP header.
+ *
+ * ip_chksum_fail
+ * Indicates that the computed checksum did not match the
+ * checksum in the IP header.
+ *
+ * sa_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the source MAC address.
+ *
+ * da_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the destination MAC address.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful search for the source MAC address
+ * due to the expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful search for the destination MAC
+ * address due to the expiring of the search timer.
+ *
+ * encrypt_required
+ * Indicates that this data type frame is not encrypted even if
+ * the policy for this MPDU requires encryption as indicated in
+ * the peer table key type.
+ *
+ * directed
+ * MPDU is a directed packet which means that the RA matched
+ * our STA addresses. In proxySTA it means that the TA matched
+ * an entry in our address search table with the corresponding
+ * 'no_ack' bit is the address search entry cleared.
+ *
+ * buffer_fragment
+ * Indicates that at least one of the rx buffers has been
+ * fragmented. If set the FW should look at the rx_frag_info
+ * descriptor described below.
+ *
+ * mpdu_length_err
+ * Indicates that the MPDU was pre-maturely terminated
+ * resulting in a truncated MPDU. Don't trust the MPDU length
+ * field.
+ *
+ * tkip_mic_err
+ * Indicates that the MPDU Michael integrity check failed
+ *
+ * decrypt_err
+ * Indicates that the MPDU decrypt integrity check failed
+ *
+ * fcs_err
+ * Indicates that the MPDU FCS check failed
+ *
+ * msdu_done
+ * If set indicates that the RX packet data, RX header data, RX
+ * PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
+ * start/end descriptors and RX Attention descriptor are all
+ * valid. This bit must be in the last octet of the
+ * descriptor.
+ */
+
+struct rx_frag_info_common {
+ u8 ring0_more_count;
+ u8 ring1_more_count;
+ u8 ring2_more_count;
+ u8 ring3_more_count;
+} __packed;
+
+struct rx_frag_info_wcn3990 {
+ u8 ring4_more_count;
+ u8 ring5_more_count;
+ u8 ring6_more_count;
+ u8 ring7_more_count;
+} __packed;
+
+struct rx_frag_info {
+ struct rx_frag_info_common common;
+ union {
+ struct rx_frag_info_wcn3990 wcn3990;
+ } __packed;
+} __packed;
+
+struct rx_frag_info_v1 {
+ struct rx_frag_info_common common;
+} __packed;
+
+/*
+ * ring0_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 0. Field is filled in by the RX_DMA.
+ *
+ * ring1_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 1. Field is filled in by the RX_DMA.
+ *
+ * ring2_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 2. Field is filled in by the RX_DMA.
+ *
+ * ring3_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 3. Field is filled in by the RX_DMA.
+ */
+
+enum htt_rx_mpdu_encrypt_type {
+ HTT_RX_MPDU_ENCRYPT_WEP40 = 0,
+ HTT_RX_MPDU_ENCRYPT_WEP104 = 1,
+ HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC = 2,
+ HTT_RX_MPDU_ENCRYPT_WEP128 = 3,
+ HTT_RX_MPDU_ENCRYPT_TKIP_WPA = 4,
+ HTT_RX_MPDU_ENCRYPT_WAPI = 5,
+ HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6,
+ HTT_RX_MPDU_ENCRYPT_NONE = 7,
+ HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2 = 8,
+ HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2 = 9,
+ HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2 = 10,
+};
+
+#define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff
+#define RX_MPDU_START_INFO0_PEER_IDX_LSB 0
+#define RX_MPDU_START_INFO0_SEQ_NUM_MASK 0x0fff0000
+#define RX_MPDU_START_INFO0_SEQ_NUM_LSB 16
+#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_MASK 0xf0000000
+#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_LSB 28
+#define RX_MPDU_START_INFO0_FROM_DS BIT(11)
+#define RX_MPDU_START_INFO0_TO_DS BIT(12)
+#define RX_MPDU_START_INFO0_ENCRYPTED BIT(13)
+#define RX_MPDU_START_INFO0_RETRY BIT(14)
+#define RX_MPDU_START_INFO0_TXBF_H_INFO BIT(15)
+
+#define RX_MPDU_START_INFO1_TID_MASK 0xf0000000
+#define RX_MPDU_START_INFO1_TID_LSB 28
+#define RX_MPDU_START_INFO1_DIRECTED BIT(16)
+
+struct rx_mpdu_start {
+ __le32 info0;
+ union {
+ struct {
+ __le32 pn31_0;
+ __le32 info1; /* %RX_MPDU_START_INFO1_ */
+ } __packed;
+ struct {
+ u8 pn[6];
+ } __packed;
+ } __packed;
+} __packed;
+
+/*
+ * peer_idx
+ * The index of the address search table which associated with
+ * the peer table entry corresponding to this MPDU. Only valid
+ * when first_msdu is set.
+ *
+ * fr_ds
+ * Set if the from DS bit is set in the frame control. Only
+ * valid when first_msdu is set.
+ *
+ * to_ds
+ * Set if the to DS bit is set in the frame control. Only
+ * valid when first_msdu is set.
+ *
+ * encrypted
+ * Protected bit from the frame control. Only valid when
+ * first_msdu is set.
+ *
+ * retry
+ * Retry bit from the frame control. Only valid when
+ * first_msdu is set.
+ *
+ * txbf_h_info
+ * The MPDU data will contain H information. Primarily used
+ * for debug.
+ *
+ * seq_num
+ * The sequence number from the 802.11 header. Only valid when
+ * first_msdu is set.
+ *
+ * encrypt_type
+ * Indicates type of decrypt cipher used (as defined in the
+ * peer table)
+ * 0: WEP40
+ * 1: WEP104
+ * 2: TKIP without MIC
+ * 3: WEP128
+ * 4: TKIP (WPA)
+ * 5: WAPI
+ * 6: AES-CCM (WPA2)
+ * 7: No cipher
+ * Only valid when first_msdu_is set
+ *
+ * pn_31_0
+ * Bits [31:0] of the PN number extracted from the IV field
+ * WEP: IV = {key_id_octet, pn2, pn1, pn0}. Only pn[23:0] is
+ * valid.
+ * TKIP: IV = {pn5, pn4, pn3, pn2, key_id_octet, pn0,
+ * WEPSeed[1], pn1}. Only pn[47:0] is valid.
+ * AES-CCM: IV = {pn5, pn4, pn3, pn2, key_id_octet, 0x0, pn1,
+ * pn0}. Only pn[47:0] is valid.
+ * WAPI: IV = {key_id_octet, 0x0, pn15, pn14, pn13, pn12, pn11,
+ * pn10, pn9, pn8, pn7, pn6, pn5, pn4, pn3, pn2, pn1, pn0}.
+ * The ext_wapi_pn[127:48] in the rx_msdu_misc descriptor and
+ * pn[47:0] are valid.
+ * Only valid when first_msdu is set.
+ *
+ * pn_47_32
+ * Bits [47:32] of the PN number. See description for
+ * pn_31_0. The remaining PN fields are in the rx_msdu_end
+ * descriptor
+ *
+ * pn
+ * Use this field to access the pn without worrying about
+ * byte-order and bitmasking/bitshifting.
+ *
+ * directed
+ * See definition in RX attention descriptor
+ *
+ * reserved_2
+ * Reserved: HW should fill with zero. FW should ignore.
+ *
+ * tid
+ * The TID field in the QoS control field
+ */
+
+#define RX_MPDU_END_INFO0_RESERVED_0_MASK 0x00001fff
+#define RX_MPDU_END_INFO0_RESERVED_0_LSB 0
+#define RX_MPDU_END_INFO0_POST_DELIM_CNT_MASK 0x0fff0000
+#define RX_MPDU_END_INFO0_POST_DELIM_CNT_LSB 16
+#define RX_MPDU_END_INFO0_OVERFLOW_ERR BIT(13)
+#define RX_MPDU_END_INFO0_LAST_MPDU BIT(14)
+#define RX_MPDU_END_INFO0_POST_DELIM_ERR BIT(15)
+#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR BIT(28)
+#define RX_MPDU_END_INFO0_TKIP_MIC_ERR BIT(29)
+#define RX_MPDU_END_INFO0_DECRYPT_ERR BIT(30)
+#define RX_MPDU_END_INFO0_FCS_ERR BIT(31)
+
+struct rx_mpdu_end {
+ __le32 info0;
+} __packed;
+
+/*
+ * reserved_0
+ * Reserved
+ *
+ * overflow_err
+ * PCU Receive FIFO does not have enough space to store the
+ * full receive packet. Enough space is reserved in the
+ * receive FIFO for the status is written. This MPDU remaining
+ * packets in the PPDU will be filtered and no Ack response
+ * will be transmitted.
+ *
+ * last_mpdu
+ * Indicates that this is the last MPDU of a PPDU.
+ *
+ * post_delim_err
+ * Indicates that a delimiter FCS error occurred after this
+ * MPDU before the next MPDU. Only valid when last_msdu is
+ * set.
+ *
+ * post_delim_cnt
+ * Count of the delimiters after this MPDU. This requires the
+ * last MPDU to be held until all the EOF descriptors have been
+ * received. This may be inefficient in the future when
+ * ML-MIMO is used. Only valid when last_mpdu is set.
+ *
+ * mpdu_length_err
+ * See definition in RX attention descriptor
+ *
+ * tkip_mic_err
+ * See definition in RX attention descriptor
+ *
+ * decrypt_err
+ * See definition in RX attention descriptor
+ *
+ * fcs_err
+ * See definition in RX attention descriptor
+ */
+
+#define RX_MSDU_START_INFO0_MSDU_LENGTH_MASK 0x00003fff
+#define RX_MSDU_START_INFO0_MSDU_LENGTH_LSB 0
+#define RX_MSDU_START_INFO0_IP_OFFSET_MASK 0x000fc000
+#define RX_MSDU_START_INFO0_IP_OFFSET_LSB 14
+#define RX_MSDU_START_INFO0_RING_MASK_MASK 0x00f00000
+#define RX_MSDU_START_INFO0_RING_MASK_LSB 20
+#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_MASK 0x7f000000
+#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_LSB 24
+
+#define RX_MSDU_START_INFO1_MSDU_NUMBER_MASK 0x000000ff
+#define RX_MSDU_START_INFO1_MSDU_NUMBER_LSB 0
+#define RX_MSDU_START_INFO1_DECAP_FORMAT_MASK 0x00000300
+#define RX_MSDU_START_INFO1_DECAP_FORMAT_LSB 8
+#define RX_MSDU_START_INFO1_SA_IDX_MASK 0x07ff0000
+#define RX_MSDU_START_INFO1_SA_IDX_LSB 16
+#define RX_MSDU_START_INFO1_IPV4_PROTO BIT(10)
+#define RX_MSDU_START_INFO1_IPV6_PROTO BIT(11)
+#define RX_MSDU_START_INFO1_TCP_PROTO BIT(12)
+#define RX_MSDU_START_INFO1_UDP_PROTO BIT(13)
+#define RX_MSDU_START_INFO1_IP_FRAG BIT(14)
+#define RX_MSDU_START_INFO1_TCP_ONLY_ACK BIT(15)
+
+#define RX_MSDU_START_INFO2_DA_IDX_MASK 0x000007ff
+#define RX_MSDU_START_INFO2_DA_IDX_LSB 0
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_MASK 0x00ff0000
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_LSB 16
+#define RX_MSDU_START_INFO2_DA_BCAST_MCAST BIT(11)
+
+/* The decapped header (rx_hdr_status) contains the following:
+ * a) 802.11 header
+ * [padding to 4 bytes]
+ * b) HW crypto parameter
+ * - 0 bytes for no security
+ * - 4 bytes for WEP
+ * - 8 bytes for TKIP, AES
+ * [padding to 4 bytes]
+ * c) A-MSDU subframe header (14 bytes) if applicable
+ * d) LLC/SNAP (RFC1042, 8 bytes)
+ *
+ * In case of A-MSDU only first frame in sequence contains (a) and (b).
+ */
+enum rx_msdu_decap_format {
+ RX_MSDU_DECAP_RAW = 0,
+
+ /* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
+ * htt_rx_desc contains the original decapped 802.11 header.
+ */
+ RX_MSDU_DECAP_NATIVE_WIFI = 1,
+
+ /* Payload contains an ethernet header (struct ethhdr). */
+ RX_MSDU_DECAP_ETHERNET2_DIX = 2,
+
+ /* Payload contains two 48-bit addresses and 2-byte length (14 bytes
+ * total), followed by an RFC1042 header (8 bytes).
+ */
+ RX_MSDU_DECAP_8023_SNAP_LLC = 3
+};
+
+struct rx_msdu_start_common {
+ __le32 info0; /* %RX_MSDU_START_INFO0_ */
+ __le32 flow_id_crc;
+ __le32 info1; /* %RX_MSDU_START_INFO1_ */
+} __packed;
+
+struct rx_msdu_start_qca99x0 {
+ __le32 info2; /* %RX_MSDU_START_INFO2_ */
+} __packed;
+
+struct rx_msdu_start_wcn3990 {
+ __le32 info2; /* %RX_MSDU_START_INFO2_ */
+ __le32 info3; /* %RX_MSDU_START_INFO3_ */
+} __packed;
+
+struct rx_msdu_start {
+ struct rx_msdu_start_common common;
+ union {
+ struct rx_msdu_start_wcn3990 wcn3990;
+ } __packed;
+} __packed;
+
+struct rx_msdu_start_v1 {
+ struct rx_msdu_start_common common;
+ union {
+ struct rx_msdu_start_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
+/*
+ * msdu_length
+ * MSDU length in bytes after decapsulation. This field is
+ * still valid for MPDU frames without A-MSDU. It still
+ * represents MSDU length after decapsulation
+ *
+ * ip_offset
+ * Indicates the IP offset in bytes from the start of the
+ * packet after decapsulation. Only valid if ipv4_proto or
+ * ipv6_proto is set.
+ *
+ * ring_mask
+ * Indicates the destination RX rings for this MSDU.
+ *
+ * tcp_udp_offset
+ * Indicates the offset in bytes to the start of TCP or UDP
+ * header from the start of the IP header after decapsulation.
+ * Only valid if tcp_prot or udp_prot is set. The value 0
+ * indicates that the offset is longer than 127 bytes.
+ *
+ * reserved_0c
+ * Reserved: HW should fill with zero. FW should ignore.
+ *
+ * flow_id_crc
+ * The flow_id_crc runs CRC32 on the following information:
+ * IPv4 option: dest_addr[31:0], src_addr [31:0], {24'b0,
+ * protocol[7:0]}.
+ * IPv6 option: dest_addr[127:0], src_addr [127:0], {24'b0,
+ * next_header[7:0]}
+ * UDP case: sort_port[15:0], dest_port[15:0]
+ * TCP case: sort_port[15:0], dest_port[15:0],
+ * {header_length[3:0], 6'b0, flags[5:0], window_size[15:0]},
+ * {16'b0, urgent_ptr[15:0]}, all options except 32-bit
+ * timestamp.
+ *
+ * msdu_number
+ * Indicates the MSDU number within a MPDU. This value is
+ * reset to zero at the start of each MPDU. If the number of
+ * MSDU exceeds 255 this number will wrap using modulo 256.
+ *
+ * decap_format
+ * Indicates the format after decapsulation:
+ * 0: RAW: No decapsulation
+ * 1: Native WiFi
+ * 2: Ethernet 2 (DIX)
+ * 3: 802.3 (SNAP/LLC)
+ *
+ * ipv4_proto
+ * Set if L2 layer indicates IPv4 protocol.
+ *
+ * ipv6_proto
+ * Set if L2 layer indicates IPv6 protocol.
+ *
+ * tcp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP
+ * protocol indicates TCP.
+ *
+ * udp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP
+ * protocol indicates UDP.
+ *
+ * ip_frag
+ * Indicates that either the IP More frag bit is set or IP frag
+ * number is non-zero. If set indicates that this is a
+ * fragmented IP packet.
+ *
+ * tcp_only_ack
+ * Set if only the TCP Ack bit is set in the TCP flags and if
+ * the TCP payload is 0.
+ *
+ * sa_idx
+ * The offset in the address table which matches the MAC source
+ * address.
+ *
+ * reserved_2b
+ * Reserved: HW should fill with zero. FW should ignore.
+ */
+
+#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_MASK 0x00003fff
+#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB 0
+#define RX_MSDU_END_INFO0_FIRST_MSDU BIT(14)
+#define RX_MSDU_END_INFO0_LAST_MSDU BIT(15)
+#define RX_MSDU_END_INFO0_MSDU_LIMIT_ERR BIT(18)
+#define RX_MSDU_END_INFO0_PRE_DELIM_ERR BIT(30)
+#define RX_MSDU_END_INFO0_RESERVED_3B BIT(31)
+
+struct rx_msdu_end_common {
+ __le16 ip_hdr_cksum;
+ __le16 tcp_hdr_cksum;
+ u8 key_id_octet;
+ u8 classification_filter;
+ u8 wapi_pn[10];
+ __le32 info0;
+} __packed;
+
+#define RX_MSDU_END_INFO1_TCP_FLAG_MASK 0x000001ff
+#define RX_MSDU_END_INFO1_TCP_FLAG_LSB 0
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_MASK 0x00001c00
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_LSB 10
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_MASK 0xffff0000
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_LSB 16
+#define RX_MSDU_END_INFO1_IRO_ELIGIBLE BIT(9)
+
+#define RX_MSDU_END_INFO2_DA_OFFSET_MASK 0x0000003f
+#define RX_MSDU_END_INFO2_DA_OFFSET_LSB 0
+#define RX_MSDU_END_INFO2_SA_OFFSET_MASK 0x00000fc0
+#define RX_MSDU_END_INFO2_SA_OFFSET_LSB 6
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_MASK 0x0003f000
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_LSB 12
+
+struct rx_msdu_end_qca99x0 {
+ __le32 ipv6_crc;
+ __le32 tcp_seq_no;
+ __le32 tcp_ack_no;
+ __le32 info1;
+ __le32 info2;
+} __packed;
+
+struct rx_msdu_end_wcn3990 {
+ __le32 ipv6_crc;
+ __le32 tcp_seq_no;
+ __le32 tcp_ack_no;
+ __le32 info1;
+ __le32 info2;
+ __le32 rule_indication_0;
+ __le32 rule_indication_1;
+ __le32 rule_indication_2;
+ __le32 rule_indication_3;
+} __packed;
+
+struct rx_msdu_end {
+ struct rx_msdu_end_common common;
+ union {
+ struct rx_msdu_end_wcn3990 wcn3990;
+ } __packed;
+} __packed;
+
+struct rx_msdu_end_v1 {
+ struct rx_msdu_end_common common;
+ union {
+ struct rx_msdu_end_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
+/*
+ *ip_hdr_chksum
+ * This can include the IP header checksum or the pseudo header
+ * checksum used by TCP/UDP checksum.
+ *
+ *tcp_udp_chksum
+ * The value of the computed TCP/UDP checksum. A mode bit
+ * selects whether this checksum is the full checksum or the
+ * partial checksum which does not include the pseudo header.
+ *
+ *key_id_octet
+ * The key ID octet from the IV. Only valid when first_msdu is
+ * set.
+ *
+ *classification_filter
+ * Indicates the number classification filter rule
+ *
+ *ext_wapi_pn_63_48
+ * Extension PN (packet number) which is only used by WAPI.
+ * This corresponds to WAPI PN bits [63:48] (pn6 and pn7). The
+ * WAPI PN bits [63:0] are in the pn field of the rx_mpdu_start
+ * descriptor.
+ *
+ *ext_wapi_pn_95_64
+ * Extension PN (packet number) which is only used by WAPI.
+ * This corresponds to WAPI PN bits [95:64] (pn8, pn9, pn10 and
+ * pn11).
+ *
+ *ext_wapi_pn_127_96
+ * Extension PN (packet number) which is only used by WAPI.
+ * This corresponds to WAPI PN bits [127:96] (pn12, pn13, pn14,
+ * pn15).
+ *
+ *reported_mpdu_length
+ * MPDU length before decapsulation. Only valid when
+ * first_msdu is set. This field is taken directly from the
+ * length field of the A-MPDU delimiter or the preamble length
+ * field for non-A-MPDU frames.
+ *
+ *first_msdu
+ * Indicates the first MSDU of A-MSDU. If both first_msdu and
+ * last_msdu are set in the MSDU then this is a non-aggregated
+ * MSDU frame: normal MPDU. Interior MSDU in an A-MSDU shall
+ * have both first_mpdu and last_mpdu bits set to 0.
+ *
+ *last_msdu
+ * Indicates the last MSDU of the A-MSDU. MPDU end status is
+ * only valid when last_msdu is set.
+ *
+ *msdu_limit_error
+ * Indicates that the MSDU threshold was exceeded and thus
+ * all the rest of the MSDUs will not be scattered and
+ * will not be decapsulated but will be received in RAW format
+ * as a single MSDU buffer.
+ *
+ *reserved_3a
+ * Reserved: HW should fill with zero. FW should ignore.
+ *
+ *pre_delim_err
+ * Indicates that the first delimiter had a FCS failure. Only
+ * valid when first_mpdu and first_msdu are set.
+ *
+ *reserved_3b
+ * Reserved: HW should fill with zero. FW should ignore.
+ */
+
+#define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04
+#define HTT_RX_PPDU_START_PREAMBLE_HT 0x08
+#define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09
+#define HTT_RX_PPDU_START_PREAMBLE_VHT 0x0C
+#define HTT_RX_PPDU_START_PREAMBLE_VHT_WITH_TXBF 0x0D
+
+#define RX_PPDU_START_INFO0_IS_GREENFIELD BIT(0)
+
+#define RX_PPDU_START_INFO1_L_SIG_RATE_MASK 0x0000000f
+#define RX_PPDU_START_INFO1_L_SIG_RATE_LSB 0
+#define RX_PPDU_START_INFO1_L_SIG_LENGTH_MASK 0x0001ffe0
+#define RX_PPDU_START_INFO1_L_SIG_LENGTH_LSB 5
+#define RX_PPDU_START_INFO1_L_SIG_TAIL_MASK 0x00fc0000
+#define RX_PPDU_START_INFO1_L_SIG_TAIL_LSB 18
+#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_MASK 0xff000000
+#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_LSB 24
+#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT BIT(4)
+#define RX_PPDU_START_INFO1_L_SIG_PARITY BIT(17)
+
+#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_MASK 0x00ffffff
+#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_LSB 0
+
+#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_MASK 0x00ffffff
+#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_LSB 0
+#define RX_PPDU_START_INFO3_TXBF_H_INFO BIT(24)
+
+#define RX_PPDU_START_INFO4_VHT_SIG_B_MASK 0x1fffffff
+#define RX_PPDU_START_INFO4_VHT_SIG_B_LSB 0
+
+#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
+#define RX_PPDU_START_INFO5_SERVICE_LSB 0
+
+/* No idea what this flag means. It seems to be always set in rate. */
+#define RX_PPDU_START_RATE_FLAG BIT(3)
+
+struct rx_ppdu_start {
+ struct {
+ u8 pri20_mhz;
+ u8 ext20_mhz;
+ u8 ext40_mhz;
+ u8 ext80_mhz;
+ } rssi_chains[4];
+ u8 rssi_comb;
+ __le16 rsvd0;
+ u8 info0; /* %RX_PPDU_START_INFO0_ */
+ __le32 info1; /* %RX_PPDU_START_INFO1_ */
+ __le32 info2; /* %RX_PPDU_START_INFO2_ */
+ __le32 info3; /* %RX_PPDU_START_INFO3_ */
+ __le32 info4; /* %RX_PPDU_START_INFO4_ */
+ __le32 info5; /* %RX_PPDU_START_INFO5_ */
+} __packed;
+
+/*
+ * rssi_chain0_pri20
+ * RSSI of RX PPDU on chain 0 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec20
+ * RSSI of RX PPDU on chain 0 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec40
+ * RSSI of RX PPDU on chain 0 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec80
+ * RSSI of RX PPDU on chain 0 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_pri20
+ * RSSI of RX PPDU on chain 1 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec20
+ * RSSI of RX PPDU on chain 1 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec40
+ * RSSI of RX PPDU on chain 1 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec80
+ * RSSI of RX PPDU on chain 1 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_pri20
+ * RSSI of RX PPDU on chain 2 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec20
+ * RSSI of RX PPDU on chain 2 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec40
+ * RSSI of RX PPDU on chain 2 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec80
+ * RSSI of RX PPDU on chain 2 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_pri20
+ * RSSI of RX PPDU on chain 3 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec20
+ * RSSI of RX PPDU on chain 3 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec40
+ * RSSI of RX PPDU on chain 3 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec80
+ * RSSI of RX PPDU on chain 3 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_comb
+ * The combined RSSI of RX PPDU of all active chains and
+ * bandwidths. Value of 0x80 indicates invalid.
+ *
+ * reserved_4a
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * is_greenfield
+ * Do we really support this?
+ *
+ * reserved_4b
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * l_sig_rate
+ * If l_sig_rate_select is 0:
+ * 0x8: OFDM 48 Mbps
+ * 0x9: OFDM 24 Mbps
+ * 0xA: OFDM 12 Mbps
+ * 0xB: OFDM 6 Mbps
+ * 0xC: OFDM 54 Mbps
+ * 0xD: OFDM 36 Mbps
+ * 0xE: OFDM 18 Mbps
+ * 0xF: OFDM 9 Mbps
+ * If l_sig_rate_select is 1:
+ * 0x8: CCK 11 Mbps long preamble
+ * 0x9: CCK 5.5 Mbps long preamble
+ * 0xA: CCK 2 Mbps long preamble
+ * 0xB: CCK 1 Mbps long preamble
+ * 0xC: CCK 11 Mbps short preamble
+ * 0xD: CCK 5.5 Mbps short preamble
+ * 0xE: CCK 2 Mbps short preamble
+ *
+ * l_sig_rate_select
+ * Legacy signal rate select. If set then l_sig_rate indicates
+ * CCK rates. If clear then l_sig_rate indicates OFDM rates.
+ *
+ * l_sig_length
+ * Length of legacy frame in octets.
+ *
+ * l_sig_parity
+ * Odd parity over l_sig_rate and l_sig_length
+ *
+ * l_sig_tail
+ * Tail bits for Viterbi decoder
+ *
+ * preamble_type
+ * Indicates the type of preamble ahead:
+ * 0x4: Legacy (OFDM/CCK)
+ * 0x8: HT
+ * 0x9: HT with TxBF
+ * 0xC: VHT
+ * 0xD: VHT with TxBF
+ * 0x80 - 0xFF: Reserved for special baseband data types such
+ * as radar and spectral scan.
+ *
+ * ht_sig_vht_sig_a_1
+ * If preamble_type == 0x8 or 0x9
+ * HT-SIG (first 24 bits)
+ * If preamble_type == 0xC or 0xD
+ * VHT-SIG A (first 24 bits)
+ * Else
+ * Reserved
+ *
+ * reserved_6
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * ht_sig_vht_sig_a_2
+ * If preamble_type == 0x8 or 0x9
+ * HT-SIG (last 24 bits)
+ * If preamble_type == 0xC or 0xD
+ * VHT-SIG A (last 24 bits)
+ * Else
+ * Reserved
+ *
+ * txbf_h_info
+ * Indicates that the packet data carries H information which
+ * is used for TxBF debug.
+ *
+ * reserved_7
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * vht_sig_b
+ * WiFi 1.0 and WiFi 2.0 will likely have this field to be all
+ * 0s since the BB does not plan on decoding VHT SIG-B.
+ *
+ * reserved_8
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * service
+ * Service field from BB for OFDM, HT and VHT packets. CCK
+ * packets will have service field of 0.
+ *
+ * reserved_9
+ * Reserved: HW should fill with 0, FW should ignore.
+ */
+
+#define RX_PPDU_END_FLAGS_PHY_ERR BIT(0)
+#define RX_PPDU_END_FLAGS_RX_LOCATION BIT(1)
+#define RX_PPDU_END_FLAGS_TXBF_H_INFO BIT(2)
+
+#define RX_PPDU_END_INFO0_RX_ANTENNA_MASK 0x00ffffff
+#define RX_PPDU_END_INFO0_RX_ANTENNA_LSB 0
+#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK BIT(24)
+#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL BIT(25)
+
+#define RX_PPDU_END_INFO1_PEER_IDX_MASK 0x1ffc
+#define RX_PPDU_END_INFO1_PEER_IDX_LSB 2
+#define RX_PPDU_END_INFO1_BB_DATA BIT(0)
+#define RX_PPDU_END_INFO1_PEER_IDX_VALID BIT(1)
+#define RX_PPDU_END_INFO1_PPDU_DONE BIT(15)
+
+struct rx_ppdu_end_common {
+ __le32 evm_p0;
+ __le32 evm_p1;
+ __le32 evm_p2;
+ __le32 evm_p3;
+ __le32 evm_p4;
+ __le32 evm_p5;
+ __le32 evm_p6;
+ __le32 evm_p7;
+ __le32 evm_p8;
+ __le32 evm_p9;
+ __le32 evm_p10;
+ __le32 evm_p11;
+ __le32 evm_p12;
+ __le32 evm_p13;
+ __le32 evm_p14;
+ __le32 evm_p15;
+ __le32 tsf_timestamp;
+ __le32 wb_timestamp;
+} __packed;
+
+struct rx_ppdu_end_qca988x {
+ u8 locationing_timestamp;
+ u8 phy_err_code;
+ __le16 flags; /* %RX_PPDU_END_FLAGS_ */
+ __le32 info0; /* %RX_PPDU_END_INFO0_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PPDU_END_RTT_CORRELATION_VALUE_MASK 0x00ffffff
+#define RX_PPDU_END_RTT_CORRELATION_VALUE_LSB 0
+#define RX_PPDU_END_RTT_UNUSED_MASK 0x7f000000
+#define RX_PPDU_END_RTT_UNUSED_LSB 24
+#define RX_PPDU_END_RTT_NORMAL_MODE BIT(31)
+
+struct rx_ppdu_end_qca6174 {
+ u8 locationing_timestamp;
+ u8 phy_err_code;
+ __le16 flags; /* %RX_PPDU_END_FLAGS_ */
+ __le32 info0; /* %RX_PPDU_END_INFO0_ */
+ __le32 rtt; /* %RX_PPDU_END_RTT_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PKT_END_INFO0_RX_SUCCESS BIT(0)
+#define RX_PKT_END_INFO0_ERR_TX_INTERRUPT_RX BIT(3)
+#define RX_PKT_END_INFO0_ERR_OFDM_POWER_DROP BIT(4)
+#define RX_PKT_END_INFO0_ERR_OFDM_RESTART BIT(5)
+#define RX_PKT_END_INFO0_ERR_CCK_POWER_DROP BIT(6)
+#define RX_PKT_END_INFO0_ERR_CCK_RESTART BIT(7)
+
+#define RX_LOCATION_INFO_RTT_CORR_VAL_MASK 0x0001ffff
+#define RX_LOCATION_INFO_RTT_CORR_VAL_LSB 0
+#define RX_LOCATION_INFO_FAC_STATUS_MASK 0x000c0000
+#define RX_LOCATION_INFO_FAC_STATUS_LSB 18
+#define RX_LOCATION_INFO_PKT_BW_MASK 0x00700000
+#define RX_LOCATION_INFO_PKT_BW_LSB 20
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_MASK 0x01800000
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_LSB 23
+#define RX_LOCATION_INFO_CIR_STATUS BIT(17)
+#define RX_LOCATION_INFO_RTT_MAC_PHY_PHASE BIT(25)
+#define RX_LOCATION_INFO_RTT_TX_DATA_START_X BIT(26)
+#define RX_LOCATION_INFO_HW_IFFT_MODE BIT(30)
+#define RX_LOCATION_INFO_RX_LOCATION_VALID BIT(31)
+
+struct rx_pkt_end {
+ __le32 info0; /* %RX_PKT_END_INFO0_ */
+ __le32 phy_timestamp_1;
+ __le32 phy_timestamp_2;
+} __packed;
+
+struct rx_pkt_end_wcn3990 {
+ __le32 info0; /* %RX_PKT_END_INFO0_ */
+ __le64 phy_timestamp_1;
+ __le64 phy_timestamp_2;
+} __packed;
+
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK 0x00003fff
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB 0
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK 0x1fff8000
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_LSB 15
+#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_MASK 0xc0000000
+#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_LSB 30
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_STATUS BIT(14)
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_STATUS BIT(29)
+
+#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_MASK 0x0000000c
+#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_LSB 2
+#define RX_LOCATION_INFO1_PKT_BW_MASK 0x00000030
+#define RX_LOCATION_INFO1_PKT_BW_LSB 4
+#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_MASK 0x0000ff00
+#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_LSB 8
+#define RX_LOCATION_INFO1_RTT_MSC_RATE_MASK 0x000f0000
+#define RX_LOCATION_INFO1_RTT_MSC_RATE_LSB 16
+#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_MASK 0x00300000
+#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_LSB 20
+#define RX_LOCATION_INFO1_TIMING_BACKOFF_MASK 0x07c00000
+#define RX_LOCATION_INFO1_TIMING_BACKOFF_LSB 22
+#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_MASK 0x18000000
+#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_LSB 27
+#define RX_LOCATION_INFO1_RTT_CFR_STATUS BIT(0)
+#define RX_LOCATION_INFO1_RTT_CIR_STATUS BIT(1)
+#define RX_LOCATION_INFO1_RTT_GI_TYPE BIT(7)
+#define RX_LOCATION_INFO1_RTT_MAC_PHY_PHASE BIT(29)
+#define RX_LOCATION_INFO1_RTT_TX_DATA_START_X_PHASE BIT(30)
+#define RX_LOCATION_INFO1_RX_LOCATION_VALID BIT(31)
+
+struct rx_location_info {
+ __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */
+ __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
+} __packed;
+
+struct rx_location_info_wcn3990 {
+ __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */
+ __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
+ __le32 rx_location_info2; /* %RX_LOCATION_INFO2_ */
+} __packed;
+
+enum rx_phy_ppdu_end_info0 {
+ RX_PHY_PPDU_END_INFO0_ERR_RADAR = BIT(2),
+ RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT = BIT(3),
+ RX_PHY_PPDU_END_INFO0_ERR_RX_NAP = BIT(4),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_TIMING = BIT(5),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_PARITY = BIT(6),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_RATE = BIT(7),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_LENGTH = BIT(8),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_RESTART = BIT(9),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_SERVICE = BIT(10),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_POWER_DROP = BIT(11),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_BLOCKER = BIT(12),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_TIMING = BIT(13),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_HEADER_CRC = BIT(14),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_RATE = BIT(15),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_LENGTH = BIT(16),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_RESTART = BIT(17),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_SERVICE = BIT(18),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_POWER_DROP = BIT(19),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_CRC = BIT(20),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_LENGTH = BIT(21),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_RATE = BIT(22),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_ZLF = BIT(23),
+ RX_PHY_PPDU_END_INFO0_ERR_FALSE_RADAR_EXT = BIT(24),
+ RX_PHY_PPDU_END_INFO0_ERR_GREEN_FIELD = BIT(25),
+ RX_PHY_PPDU_END_INFO0_ERR_SPECTRAL_SCAN = BIT(26),
+ RX_PHY_PPDU_END_INFO0_ERR_RX_DYN_BW = BIT(27),
+ RX_PHY_PPDU_END_INFO0_ERR_LEG_HT_MISMATCH = BIT(28),
+ RX_PHY_PPDU_END_INFO0_ERR_VHT_CRC = BIT(29),
+ RX_PHY_PPDU_END_INFO0_ERR_VHT_SIGA = BIT(30),
+ RX_PHY_PPDU_END_INFO0_ERR_VHT_LSIG = BIT(31),
+};
+
+enum rx_phy_ppdu_end_info1 {
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_NDP = BIT(0),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_NSYM = BIT(1),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_EXT_SYM = BIT(2),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID0 = BIT(3),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID1_62 = BIT(4),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID63 = BIT(5),
+ RX_PHY_PPDU_END_INFO1_ERR_OFDM_LDPC_DECODER = BIT(6),
+ RX_PHY_PPDU_END_INFO1_ERR_DEFER_NAP = BIT(7),
+ RX_PHY_PPDU_END_INFO1_ERR_FDOMAIN_TIMEOUT = BIT(8),
+ RX_PHY_PPDU_END_INFO1_ERR_LSIG_REL_CHECK = BIT(9),
+ RX_PHY_PPDU_END_INFO1_ERR_BT_COLLISION = BIT(10),
+ RX_PHY_PPDU_END_INFO1_ERR_MU_FEEDBACK = BIT(11),
+ RX_PHY_PPDU_END_INFO1_ERR_TX_INTERRUPT_RX = BIT(12),
+ RX_PHY_PPDU_END_INFO1_ERR_RX_CBF = BIT(13),
+};
+
+struct rx_phy_ppdu_end {
+ __le32 info0; /* %RX_PHY_PPDU_END_INFO0_ */
+ __le32 info1; /* %RX_PHY_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PPDU_END_RX_TIMING_OFFSET_MASK 0x00000fff
+#define RX_PPDU_END_RX_TIMING_OFFSET_LSB 0
+
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_MASK 0x00ffffff
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_LSB 0
+#define RX_PPDU_END_RX_INFO_TX_HT_VHT_ACK BIT(24)
+#define RX_PPDU_END_RX_INFO_RX_PKT_END_VALID BIT(25)
+#define RX_PPDU_END_RX_INFO_RX_PHY_PPDU_END_VALID BIT(26)
+#define RX_PPDU_END_RX_INFO_RX_TIMING_OFFSET_VALID BIT(27)
+#define RX_PPDU_END_RX_INFO_BB_CAPTURED_CHANNEL BIT(28)
+#define RX_PPDU_END_RX_INFO_UNSUPPORTED_MU_NC BIT(29)
+#define RX_PPDU_END_RX_INFO_OTP_TXBF_DISABLE BIT(30)
+
+struct rx_ppdu_end_qca99x0 {
+ struct rx_pkt_end rx_pkt_end;
+ __le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+ struct rx_phy_ppdu_end rx_phy_ppdu_end;
+ __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
+ __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+struct rx_ppdu_end_qca9984 {
+ struct rx_pkt_end rx_pkt_end;
+ struct rx_location_info rx_location_info;
+ struct rx_phy_ppdu_end rx_phy_ppdu_end;
+ __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
+ __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+struct rx_ppdu_end_wcn3990 {
+ struct rx_pkt_end_wcn3990 rx_pkt_end;
+ struct rx_location_info_wcn3990 rx_location_info;
+ struct rx_phy_ppdu_end rx_phy_ppdu_end;
+ __le32 rx_timing_offset;
+ __le32 reserved_info_0;
+ __le32 reserved_info_1;
+ __le32 rx_antenna_info;
+ __le32 rx_coex_info;
+ __le32 rx_mpdu_cnt_info;
+ __le64 phy_timestamp_tx;
+ __le32 rx_bb_length;
+} __packed;
+
+struct rx_ppdu_end {
+ struct rx_ppdu_end_common common;
+ union {
+ struct rx_ppdu_end_wcn3990 wcn3990;
+ } __packed;
+} __packed;
+
+struct rx_ppdu_end_v1 {
+ struct rx_ppdu_end_common common;
+ union {
+ struct rx_ppdu_end_qca988x qca988x;
+ struct rx_ppdu_end_qca6174 qca6174;
+ struct rx_ppdu_end_qca99x0 qca99x0;
+ struct rx_ppdu_end_qca9984 qca9984;
+ } __packed;
+} __packed;
+
+/*
+ * evm_p0
+ * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p1
+ * EVM for pilot 1. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p2
+ * EVM for pilot 2. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p3
+ * EVM for pilot 3. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p4
+ * EVM for pilot 4. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p5
+ * EVM for pilot 5. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p6
+ * EVM for pilot 6. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p7
+ * EVM for pilot 7. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p8
+ * EVM for pilot 8. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p9
+ * EVM for pilot 9. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p10
+ * EVM for pilot 10. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p11
+ * EVM for pilot 11. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p12
+ * EVM for pilot 12. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p13
+ * EVM for pilot 13. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p14
+ * EVM for pilot 14. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p15
+ * EVM for pilot 15. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * tsf_timestamp
+ * Receive TSF timestamp sampled on the rising edge of
+ * rx_clear. For PHY errors this may be the current TSF when
+ * phy_error is asserted if the rx_clear does not assert before
+ * the end of the PHY error.
+ *
+ * wb_timestamp
+ * WLAN/BT timestamp is a 1 usec resolution timestamp which
+ * does not get updated based on receive beacon like TSF. The
+ * same rules for capturing tsf_timestamp are used to capture
+ * the wb_timestamp.
+ *
+ * locationing_timestamp
+ * Timestamp used for locationing. This timestamp is used to
+ * indicate fractions of usec. For example if the MAC clock is
+ * running at 80 MHz, the timestamp will increment every 12.5
+ * nsec. The value starts at 0 and increments to 79 and
+ * returns to 0 and repeats. This information is valid for
+ * every PPDU. This information can be used in conjunction
+ * with wb_timestamp to capture large delta times.
+ *
+ * phy_err_code
+ * See the 1.10.8.1.2 for the list of the PHY error codes.
+ *
+ * phy_err
+ * Indicates a PHY error was detected for this PPDU.
+ *
+ * rx_location
+ * Indicates that location information was requested.
+ *
+ * txbf_h_info
+ * Indicates that the packet data carries H information which
+ * is used for TxBF debug.
+ *
+ * reserved_18
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * rx_antenna
+ * Receive antenna value
+ *
+ * tx_ht_vht_ack
+ * Indicates that a HT or VHT Ack/BA frame was transmitted in
+ * response to this receive packet.
+ *
+ * bb_captured_channel
+ * Indicates that the BB has captured a channel dump. FW can
+ * then read the channel dump memory. This may indicate that
+ * the channel was captured either based on PCU setting the
+ * capture_channel bit BB descriptor or FW setting the
+ * capture_channel mode bit.
+ *
+ * reserved_19
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * bb_length
+ * Indicates the number of bytes of baseband information for
+ * PPDUs where the BB descriptor preamble type is 0x80 to 0xFF
+ * which indicates that this is not a normal PPDU but rather
+ * contains baseband debug information.
+ *
+ * reserved_20
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * ppdu_done
+ * PPDU end status is only valid when ppdu_done bit is set.
+ * Every time HW sets this bit in memory FW/SW must clear this
+ * bit in memory. FW will initialize all the ppdu_done dword
+ * to 0.
+ */
+
+#define FW_RX_DESC_INFO0_DISCARD BIT(0)
+#define FW_RX_DESC_INFO0_FORWARD BIT(1)
+#define FW_RX_DESC_INFO0_INSPECT BIT(5)
+#define FW_RX_DESC_INFO0_EXT_MASK 0xC0
+#define FW_RX_DESC_INFO0_EXT_LSB 6
+
+struct fw_rx_desc_base {
+ u8 info0;
+} __packed;
+
+#define FW_RX_DESC_FLAGS_FIRST_MSDU (1 << 0)
+#define FW_RX_DESC_FLAGS_LAST_MSDU (1 << 1)
+#define FW_RX_DESC_C3_FAILED (1 << 2)
+#define FW_RX_DESC_C4_FAILED (1 << 3)
+#define FW_RX_DESC_IPV6 (1 << 4)
+#define FW_RX_DESC_TCP (1 << 5)
+#define FW_RX_DESC_UDP (1 << 6)
+
+struct fw_rx_desc_hl {
+ union {
+ struct {
+ u8 discard:1,
+ forward:1,
+ any_err:1,
+ dup_err:1,
+ reserved:1,
+ inspect:1,
+ extension:2;
+ } bits;
+ u8 info0;
+ } u;
+
+ u8 version;
+ u8 len;
+ u8 flags;
+} __packed;
+
+#endif /* _RX_DESC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
new file mode 100644
index 000000000000..c06d50db40b8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -0,0 +1,2678 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include <linux/bitfield.h>
+#include "core.h"
+#include "bmi.h"
+#include "debug.h"
+#include "hif.h"
+#include "htc.h"
+#include "mac.h"
+#include "targaddrs.h"
+#include "trace.h"
+#include "sdio.h"
+#include "coredump.h"
+
+void ath10k_sdio_fw_crashed_dump(struct ath10k *ar);
+
+#define ATH10K_SDIO_VSG_BUF_SIZE (64 * 1024)
+
+/* inlined helper functions */
+
+static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
+ size_t len)
+{
+ return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);
+}
+
+static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)
+{
+ return (enum ath10k_htc_ep_id)pipe_id;
+}
+
+static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)
+{
+ dev_kfree_skb(pkt->skb);
+ pkt->skb = NULL;
+ pkt->alloc_len = 0;
+ pkt->act_len = 0;
+ pkt->trailer_only = false;
+}
+
+static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,
+ size_t act_len, size_t full_len,
+ bool part_of_bundle,
+ bool last_in_bundle)
+{
+ pkt->skb = dev_alloc_skb(full_len);
+ if (!pkt->skb)
+ return -ENOMEM;
+
+ pkt->act_len = act_len;
+ pkt->alloc_len = full_len;
+ pkt->part_of_bundle = part_of_bundle;
+ pkt->last_in_bundle = last_in_bundle;
+ pkt->trailer_only = false;
+
+ return 0;
+}
+
+static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)
+{
+ bool trailer_only = false;
+ struct ath10k_htc_hdr *htc_hdr =
+ (struct ath10k_htc_hdr *)pkt->skb->data;
+ u16 len = __le16_to_cpu(htc_hdr->len);
+
+ if (len == htc_hdr->trailer_len)
+ trailer_only = true;
+
+ return trailer_only;
+}
+
+/* sdio/mmc functions */
+
+static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
+ unsigned int address,
+ unsigned char val)
+{
+ *arg = FIELD_PREP(BIT(31), write) |
+ FIELD_PREP(BIT(27), raw) |
+ FIELD_PREP(BIT(26), 1) |
+ FIELD_PREP(GENMASK(25, 9), address) |
+ FIELD_PREP(BIT(8), 1) |
+ FIELD_PREP(GENMASK(7, 0), val);
+}
+
+static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
+ unsigned int address,
+ unsigned char byte)
+{
+ struct mmc_command io_cmd;
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
+ io_cmd.opcode = SD_IO_RW_DIRECT;
+ io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(card->host, &io_cmd, 0);
+}
+
+static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,
+ unsigned int address,
+ unsigned char *byte)
+{
+ struct mmc_command io_cmd;
+ int ret;
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);
+ io_cmd.opcode = SD_IO_RW_DIRECT;
+ io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);
+ if (!ret)
+ *byte = io_cmd.resp[0];
+
+ return ret;
+}
+
+static int ath10k_sdio_config(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ unsigned char byte, asyncintdelay = 2;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");
+
+ sdio_claim_host(func);
+
+ byte = 0;
+ ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
+ SDIO_CCCR_DRIVE_STRENGTH,
+ &byte);
+
+ byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;
+ byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,
+ ATH10K_SDIO_DRIVE_DTSX_TYPE_D);
+
+ ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
+ SDIO_CCCR_DRIVE_STRENGTH,
+ byte);
+
+ byte = 0;
+ ret = ath10k_sdio_func0_cmd52_rd_byte(
+ func->card,
+ CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
+ &byte);
+
+ byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |
+ CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |
+ CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);
+
+ ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
+ byte);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);
+ goto out;
+ }
+
+ byte = 0;
+ ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
+ CCCR_SDIO_IRQ_MODE_REG_SDIO3,
+ &byte);
+
+ byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;
+
+ ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_IRQ_MODE_REG_SDIO3,
+ byte);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",
+ ret);
+ goto out;
+ }
+
+ byte = 0;
+ ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
+ CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
+ &byte);
+
+ byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;
+ byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);
+
+ ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
+ byte);
+
+ /* give us some time to enable, in ms */
+ func->enable_timeout = 100;
+
+ ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",
+ ar_sdio->mbox_info.block_size, ret);
+ goto out;
+ }
+
+out:
+ sdio_release_host(func);
+ return ret;
+}
+
+static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ sdio_writel(func, val, addr, &ret);
+ if (ret) {
+ ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",
+ val, addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",
+ addr, val);
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ __le32 *buf;
+ int ret;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ *buf = cpu_to_le32(val);
+
+ sdio_claim_host(func);
+
+ ret = sdio_writesb(func, addr, buf, sizeof(*buf));
+ if (ret) {
+ ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",
+ val, addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",
+ addr, val);
+
+out:
+ sdio_release_host(func);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+ *val = sdio_readl(func, addr, &ret);
+ if (ret) {
+ ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
+ addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",
+ addr, *val);
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ ret = sdio_memcpy_fromio(func, buf, addr, len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
+ addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",
+ addr, buf, len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ /* For some reason toio() doesn't have const for the buffer, need
+ * an ugly hack to workaround that.
+ */
+ ret = sdio_memcpy_toio(func, addr, (void *)buf, len);
+ if (ret) {
+ ath10k_warn(ar, "failed to write to address 0x%x: %d\n",
+ addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",
+ addr, buf, len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ len = round_down(len, ar_sdio->mbox_info.block_size);
+
+ ret = sdio_readsb(func, buf, addr, len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",
+ addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",
+ addr, buf, len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+/* HIF mbox functions */
+
+static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
+ struct ath10k_sdio_rx_data *pkt,
+ u32 *lookaheads,
+ int *n_lookaheads)
+{
+ struct ath10k_htc *htc = &ar->htc;
+ struct sk_buff *skb = pkt->skb;
+ struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
+ bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
+ enum ath10k_htc_ep_id eid;
+ u8 *trailer;
+ int ret;
+
+ if (trailer_present) {
+ trailer = skb->data + skb->len - htc_hdr->trailer_len;
+
+ eid = pipe_id_to_eid(htc_hdr->eid);
+
+ ret = ath10k_htc_process_trailer(htc,
+ trailer,
+ htc_hdr->trailer_len,
+ eid,
+ lookaheads,
+ n_lookaheads);
+ if (ret)
+ return ret;
+
+ if (is_trailer_only_msg(pkt))
+ pkt->trailer_only = true;
+
+ skb_trim(skb, skb->len - htc_hdr->trailer_len);
+ }
+
+ skb_pull(skb, sizeof(*htc_hdr));
+
+ return 0;
+}
+
+static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
+ u32 lookaheads[],
+ int *n_lookahead)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_sdio_rx_data *pkt;
+ struct ath10k_htc_ep *ep;
+ struct ath10k_skb_rxcb *cb;
+ enum ath10k_htc_ep_id id;
+ int ret, i, *n_lookahead_local;
+ u32 *lookaheads_local;
+ int lookahead_idx = 0;
+
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
+ lookaheads_local = lookaheads;
+ n_lookahead_local = n_lookahead;
+
+ id = ((struct ath10k_htc_hdr *)
+ &lookaheads[lookahead_idx++])->eid;
+
+ if (id >= ATH10K_HTC_EP_COUNT) {
+ ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
+ id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ep = &htc->endpoint[id];
+
+ if (ep->service_id == 0) {
+ ath10k_warn(ar, "ep %d is not connected\n", id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pkt = &ar_sdio->rx_pkts[i];
+
+ if (pkt->part_of_bundle && !pkt->last_in_bundle) {
+ /* Only read lookahead's from RX trailers
+ * for the last packet in a bundle.
+ */
+ lookahead_idx--;
+ lookaheads_local = NULL;
+ n_lookahead_local = NULL;
+ }
+
+ ret = ath10k_sdio_mbox_rx_process_packet(ar,
+ pkt,
+ lookaheads_local,
+ n_lookahead_local);
+ if (ret)
+ goto out;
+
+ if (!pkt->trailer_only) {
+ cb = ATH10K_SKB_RXCB(pkt->skb);
+ cb->eid = id;
+
+ skb_queue_tail(&ar_sdio->rx_head, pkt->skb);
+ queue_work(ar->workqueue_aux,
+ &ar_sdio->async_work_rx);
+ } else {
+ kfree_skb(pkt->skb);
+ }
+
+ /* The RX complete handler now owns the skb...*/
+ pkt->skb = NULL;
+ pkt->alloc_len = 0;
+ }
+
+ ret = 0;
+
+out:
+ /* Free all packets that was not passed on to the RX completion
+ * handler...
+ */
+ for (; i < ar_sdio->n_rx_pkts; i++)
+ ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar,
+ struct ath10k_sdio_rx_data *rx_pkts,
+ struct ath10k_htc_hdr *htc_hdr,
+ size_t full_len, size_t act_len,
+ size_t *bndl_cnt)
+{
+ int ret, i;
+ u8 max_msgs = ar->htc.max_msgs_per_htc_bundle;
+
+ *bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags);
+
+ if (*bndl_cnt > max_msgs) {
+ ath10k_warn(ar,
+ "HTC bundle length %u exceeds maximum %u\n",
+ le16_to_cpu(htc_hdr->len),
+ max_msgs);
+ return -ENOMEM;
+ }
+
+ /* Allocate bndl_cnt extra skb's for the bundle.
+ * The package containing the
+ * ATH10K_HTC_FLAG_BUNDLE_MASK flag is not included
+ * in bndl_cnt. The skb for that packet will be
+ * allocated separately.
+ */
+ for (i = 0; i < *bndl_cnt; i++) {
+ ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],
+ act_len,
+ full_len,
+ true,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
+ u32 lookaheads[], int n_lookaheads)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_htc_hdr *htc_hdr;
+ size_t full_len, act_len;
+ bool last_in_bundle;
+ int ret, i;
+ int pkt_cnt = 0;
+
+ if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
+ ath10k_warn(ar, "the total number of pkts to be fetched (%u) exceeds maximum %u\n",
+ n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < n_lookaheads; i++) {
+ htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
+ last_in_bundle = false;
+
+ if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
+ ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n",
+ le16_to_cpu(htc_hdr->len),
+ ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
+ ret = -ENOMEM;
+
+ ath10k_core_start_recovery(ar);
+ ath10k_warn(ar, "exceeds length, start recovery\n");
+
+ goto err;
+ }
+
+ act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
+ full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
+
+ if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
+ ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
+ htc_hdr->eid, htc_hdr->flags,
+ le16_to_cpu(htc_hdr->len));
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (ath10k_htc_get_bundle_count(
+ ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) {
+ /* HTC header indicates that every packet to follow
+ * has the same padded length so that it can be
+ * optimally fetched as a full bundle.
+ */
+ size_t bndl_cnt;
+
+ ret = ath10k_sdio_mbox_alloc_bundle(ar,
+ &ar_sdio->rx_pkts[pkt_cnt],
+ htc_hdr,
+ full_len,
+ act_len,
+ &bndl_cnt);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to allocate a bundle: %d\n",
+ ret);
+ goto err;
+ }
+
+ pkt_cnt += bndl_cnt;
+
+ /* next buffer will be the last in the bundle */
+ last_in_bundle = true;
+ }
+
+ /* Allocate skb for packet. If the packet had the
+ * ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
+ * packet skb's have been allocated in the previous step.
+ */
+ if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
+ full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
+
+ ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt],
+ act_len,
+ full_len,
+ last_in_bundle,
+ last_in_bundle);
+ if (ret) {
+ ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
+ goto err;
+ }
+
+ pkt_cnt++;
+ }
+
+ ar_sdio->n_rx_pkts = pkt_cnt;
+
+ return 0;
+
+err:
+ for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {
+ if (!ar_sdio->rx_pkts[i].alloc_len)
+ break;
+ ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
+ }
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0];
+ struct sk_buff *skb = pkt->skb;
+ struct ath10k_htc_hdr *htc_hdr;
+ int ret;
+
+ ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
+ skb->data, pkt->alloc_len);
+ if (ret)
+ goto err;
+
+ htc_hdr = (struct ath10k_htc_hdr *)skb->data;
+ pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
+
+ if (pkt->act_len > pkt->alloc_len) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ skb_put(skb, pkt->act_len);
+ return 0;
+
+err:
+ ar_sdio->n_rx_pkts = 0;
+ ath10k_sdio_mbox_free_rx_pkt(pkt);
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_rx_data *pkt;
+ struct ath10k_htc_hdr *htc_hdr;
+ int ret, i;
+ u32 pkt_offset, virt_pkt_len;
+
+ virt_pkt_len = 0;
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++)
+ virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len;
+
+ if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) {
+ ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len);
+ ret = -E2BIG;
+ goto err;
+ }
+
+ ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
+ ar_sdio->vsg_buffer, virt_pkt_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read bundle packets: %d", ret);
+ goto err;
+ }
+
+ pkt_offset = 0;
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
+ pkt = &ar_sdio->rx_pkts[i];
+ htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);
+ pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
+
+ if (pkt->act_len > pkt->alloc_len) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ skb_put_data(pkt->skb, htc_hdr, pkt->act_len);
+ pkt_offset += pkt->alloc_len;
+ }
+
+ return 0;
+
+err:
+ /* Free all packets that was not successfully fetched. */
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++)
+ ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
+
+ ar_sdio->n_rx_pkts = 0;
+
+ return ret;
+}
+
+/* This is the timeout for mailbox processing done in the sdio irq
+ * handler. The timeout is deliberately set quite high since SDIO dump logs
+ * over serial port can/will add a substantial overhead to the processing
+ * (if enabled).
+ */
+#define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)
+
+static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
+ u32 msg_lookahead, bool *done)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];
+ int n_lookaheads = 1;
+ unsigned long timeout;
+ int ret;
+
+ *done = true;
+
+ /* Copy the lookahead obtained from the HTC register table into our
+ * temp array as a start value.
+ */
+ lookaheads[0] = msg_lookahead;
+
+ timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;
+ do {
+ /* Try to allocate as many HTC RX packets indicated by
+ * n_lookaheads.
+ */
+ ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,
+ n_lookaheads);
+ if (ret)
+ break;
+
+ if (ar_sdio->n_rx_pkts >= 2)
+ /* A recv bundle was detected, force IRQ status
+ * re-check again.
+ */
+ *done = false;
+
+ if (ar_sdio->n_rx_pkts > 1)
+ ret = ath10k_sdio_mbox_rx_fetch_bundle(ar);
+ else
+ ret = ath10k_sdio_mbox_rx_fetch(ar);
+
+ /* Process fetched packets. This will potentially update
+ * n_lookaheads depending on if the packets contain lookahead
+ * reports.
+ */
+ n_lookaheads = 0;
+ ret = ath10k_sdio_mbox_rx_process_packets(ar,
+ lookaheads,
+ &n_lookaheads);
+
+ if (!n_lookaheads || ret)
+ break;
+
+ /* For SYNCH processing, if we get here, we are running
+ * through the loop again due to updated lookaheads. Set
+ * flag that we should re-check IRQ status registers again
+ * before leaving IRQ processing, this can net better
+ * performance in high throughput situations.
+ */
+ *done = false;
+ } while (time_before(jiffies, timeout));
+
+ if (ret && (ret != -ECANCELED))
+ ath10k_warn(ar, "failed to get pending recv messages: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)
+{
+ u32 val;
+ int ret;
+
+ /* TODO: Add firmware crash handling */
+ ath10k_warn(ar, "firmware crashed\n");
+
+ /* read counter to clear the interrupt, the debug error interrupt is
+ * counter 0.
+ */
+ ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);
+ if (ret)
+ ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ u8 counter_int_status;
+ int ret;
+
+ mutex_lock(&irq_data->mtx);
+ counter_int_status = irq_data->irq_proc_reg->counter_int_status &
+ irq_data->irq_en_reg->cntr_int_status_en;
+
+ /* NOTE: other modules like GMBOX may use the counter interrupt for
+ * credit flow control on other counters, we only need to check for
+ * the debug assertion counter interrupt.
+ */
+ if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)
+ ret = ath10k_sdio_mbox_proc_dbg_intr(ar);
+ else
+ ret = 0;
+
+ mutex_unlock(&irq_data->mtx);
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ u8 error_int_status;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");
+
+ error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;
+ if (!error_int_status) {
+ ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",
+ error_int_status);
+ return -EIO;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio error_int_status 0x%x\n", error_int_status);
+
+ if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,
+ error_int_status))
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");
+
+ if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
+ error_int_status))
+ ath10k_warn(ar, "rx underflow interrupt error\n");
+
+ if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
+ error_int_status))
+ ath10k_warn(ar, "tx overflow interrupt error\n");
+
+ /* Clear the interrupt */
+ irq_data->irq_proc_reg->error_int_status &= ~error_int_status;
+
+ /* set W1C value to clear the interrupt, this hits the register first */
+ ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,
+ error_int_status);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to error int status address: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ u8 cpu_int_status;
+ int ret;
+
+ mutex_lock(&irq_data->mtx);
+ cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &
+ irq_data->irq_en_reg->cpu_int_status_en;
+ if (!cpu_int_status) {
+ ath10k_warn(ar, "CPU interrupt status is zero\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Clear the interrupt */
+ irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;
+
+ /* Set up the register transfer buffer to hit the register 4 times,
+ * this is done to make the access 4-byte aligned to mitigate issues
+ * with host bus interconnects that restrict bus transfer lengths to
+ * be a multiple of 4-bytes.
+ *
+ * Set W1C value to clear the interrupt, this hits the register first.
+ */
+ ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,
+ cpu_int_status);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",
+ ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&irq_data->mtx);
+ if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK)
+ ath10k_sdio_fw_crashed_dump(ar);
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
+ u8 *host_int_status,
+ u32 *lookahead)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;
+ struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;
+ u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);
+ int ret;
+
+ mutex_lock(&irq_data->mtx);
+
+ *lookahead = 0;
+ *host_int_status = 0;
+
+ /* int_status_en is supposed to be non zero, otherwise interrupts
+ * shouldn't be enabled. There is however a short time frame during
+ * initialization between the irq register and int_status_en init
+ * where this can happen.
+ * We silently ignore this condition.
+ */
+ if (!irq_en_reg->int_status_en) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Read the first sizeof(struct ath10k_irq_proc_registers)
+ * bytes of the HTC register table. This
+ * will yield us the value of different int status
+ * registers and the lookahead registers.
+ */
+ ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
+ irq_proc_reg, sizeof(*irq_proc_reg));
+ if (ret) {
+ ath10k_core_start_recovery(ar);
+ ath10k_warn(ar, "read int status fail, start recovery\n");
+ goto out;
+ }
+
+ /* Update only those registers that are enabled */
+ *host_int_status = irq_proc_reg->host_int_status &
+ irq_en_reg->int_status_en;
+
+ /* Look at mbox status */
+ if (!(*host_int_status & htc_mbox)) {
+ *lookahead = 0;
+ ret = 0;
+ goto out;
+ }
+
+ /* Mask out pending mbox value, we use look ahead as
+ * the real flag for mbox processing.
+ */
+ *host_int_status &= ~htc_mbox;
+ if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {
+ *lookahead = le32_to_cpu(
+ irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);
+ if (!*lookahead)
+ ath10k_warn(ar, "sdio mbox lookahead is zero\n");
+ }
+
+out:
+ mutex_unlock(&irq_data->mtx);
+ return ret;
+}
+
+static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
+ bool *done)
+{
+ u8 host_int_status;
+ u32 lookahead;
+ int ret;
+
+ /* NOTE: HIF implementation guarantees that the context of this
+ * call allows us to perform SYNCHRONOUS I/O, that is we can block,
+ * sleep or call any API that can block or switch thread/task
+ * contexts. This is a fully schedulable context.
+ */
+
+ ret = ath10k_sdio_mbox_read_int_status(ar,
+ &host_int_status,
+ &lookahead);
+ if (ret) {
+ *done = true;
+ goto out;
+ }
+
+ if (!host_int_status && !lookahead) {
+ ret = 0;
+ *done = true;
+ goto out;
+ }
+
+ if (lookahead) {
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio pending mailbox msg lookahead 0x%08x\n",
+ lookahead);
+
+ ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,
+ lookahead,
+ done);
+ if (ret)
+ goto out;
+ }
+
+ /* now, handle the rest of the interrupts */
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio host_int_status 0x%x\n", host_int_status);
+
+ if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {
+ /* CPU Interrupt */
+ ret = ath10k_sdio_mbox_proc_cpu_intr(ar);
+ if (ret)
+ goto out;
+ }
+
+ if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {
+ /* Error Interrupt */
+ ret = ath10k_sdio_mbox_proc_err_intr(ar);
+ if (ret)
+ goto out;
+ }
+
+ if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))
+ /* Counter Interrupt */
+ ret = ath10k_sdio_mbox_proc_counter_intr(ar);
+
+ ret = 0;
+
+out:
+ /* An optimization to bypass reading the IRQ status registers
+ * unnecessarily which can re-wake the target, if upper layers
+ * determine that we are in a low-throughput mode, we can rely on
+ * taking another interrupt rather than re-checking the status
+ * registers which can re-wake the target.
+ *
+ * NOTE : for host interfaces that makes use of detecting pending
+ * mbox messages at hif can not use this optimization due to
+ * possible side effects, SPI requires the host to drain all
+ * messages from the mailbox before exiting the ISR routine.
+ */
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio pending irqs done %d status %d",
+ *done, ret);
+
+ return ret;
+}
+
+static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
+ u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;
+
+ mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;
+ mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;
+ mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;
+ mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;
+ mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;
+
+ mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
+
+ dev_id_base = (device & 0x0F00);
+ dev_id_chiprev = (device & 0x00FF);
+ switch (dev_id_base) {
+ case (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00):
+ if (dev_id_chiprev < 4)
+ mbox_info->ext_info[0].htc_ext_sz =
+ ATH10K_HIF_MBOX0_EXT_WIDTH;
+ else
+ /* from QCA6174 2.0(0x504), the width has been extended
+ * to 56K
+ */
+ mbox_info->ext_info[0].htc_ext_sz =
+ ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
+ break;
+ case (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00):
+ mbox_info->ext_info[0].htc_ext_sz =
+ ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
+ break;
+ default:
+ mbox_info->ext_info[0].htc_ext_sz =
+ ATH10K_HIF_MBOX0_EXT_WIDTH;
+ }
+
+ mbox_info->ext_info[1].htc_ext_addr =
+ mbox_info->ext_info[0].htc_ext_addr +
+ mbox_info->ext_info[0].htc_ext_sz +
+ ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;
+ mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;
+}
+
+/* BMI functions */
+
+static int ath10k_sdio_bmi_credits(struct ath10k *ar)
+{
+ u32 addr, cmd_credits;
+ unsigned long timeout;
+ int ret;
+
+ /* Read the counter register to get the command credits */
+ addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;
+ timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
+ cmd_credits = 0;
+
+ while (time_before(jiffies, timeout) && !cmd_credits) {
+ /* Hit the credit counter with a 4-byte access, the first byte
+ * read will hit the counter and cause a decrement, while the
+ * remaining 3 bytes has no effect. The rationale behind this
+ * is to make all HIF accesses 4-byte aligned.
+ */
+ ret = ath10k_sdio_read32(ar, addr, &cmd_credits);
+ if (ret) {
+ ath10k_warn(ar,
+ "unable to decrement the command credit count register: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* The counter is only 8 bits.
+ * Ignore anything in the upper 3 bytes
+ */
+ cmd_credits &= 0xFF;
+ }
+
+ if (!cmd_credits) {
+ ath10k_warn(ar, "bmi communication timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)
+{
+ unsigned long timeout;
+ u32 rx_word;
+ int ret;
+
+ timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
+ rx_word = 0;
+
+ while ((time_before(jiffies, timeout)) && !rx_word) {
+ ret = ath10k_sdio_read32(ar,
+ MBOX_HOST_INT_STATUS_ADDRESS,
+ &rx_word);
+ if (ret) {
+ ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);
+ return ret;
+ }
+
+ /* all we really want is one bit */
+ rx_word &= 1;
+ }
+
+ if (!rx_word) {
+ ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,
+ void *req, u32 req_len,
+ void *resp, u32 *resp_len)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ u32 addr;
+ int ret;
+
+ if (req) {
+ ret = ath10k_sdio_bmi_credits(ar);
+ if (ret)
+ return ret;
+
+ addr = ar_sdio->mbox_info.htc_addr;
+
+ memcpy(ar_sdio->bmi_buf, req, req_len);
+ ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);
+ if (ret) {
+ ath10k_warn(ar,
+ "unable to send the bmi data to the device: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (!resp || !resp_len)
+ /* No response expected */
+ return 0;
+
+ /* During normal bootup, small reads may be required.
+ * Rather than issue an HIF Read and then wait as the Target
+ * adds successive bytes to the FIFO, we wait here until
+ * we know that response data is available.
+ *
+ * This allows us to cleanly timeout on an unexpected
+ * Target failure rather than risk problems at the HIF level.
+ * In particular, this avoids SDIO timeouts and possibly garbage
+ * data on some host controllers. And on an interconnect
+ * such as Compact Flash (as well as some SDIO masters) which
+ * does not provide any indication on data timeout, it avoids
+ * a potential hang or garbage response.
+ *
+ * Synchronization is more difficult for reads larger than the
+ * size of the MBOX FIFO (128B), because the Target is unable
+ * to push the 129th byte of data until AFTER the Host posts an
+ * HIF Read and removes some FIFO data. So for large reads the
+ * Host proceeds to post an HIF Read BEFORE all the data is
+ * actually available to read. Fortunately, large BMI reads do
+ * not occur in practice -- they're supported for debug/development.
+ *
+ * So Host/Target BMI synchronization is divided into these cases:
+ * CASE 1: length < 4
+ * Should not happen
+ *
+ * CASE 2: 4 <= length <= 128
+ * Wait for first 4 bytes to be in FIFO
+ * If CONSERVATIVE_BMI_READ is enabled, also wait for
+ * a BMI command credit, which indicates that the ENTIRE
+ * response is available in the FIFO
+ *
+ * CASE 3: length > 128
+ * Wait for the first 4 bytes to be in FIFO
+ *
+ * For most uses, a small timeout should be sufficient and we will
+ * usually see a response quickly; but there may be some unusual
+ * (debug) cases of BMI_EXECUTE where we want an larger timeout.
+ * For now, we use an unbounded busy loop while waiting for
+ * BMI_EXECUTE.
+ *
+ * If BMI_EXECUTE ever needs to support longer-latency execution,
+ * especially in production, this code needs to be enhanced to sleep
+ * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
+ * a function of Host processor speed.
+ */
+ ret = ath10k_sdio_bmi_get_rx_lookahead(ar);
+ if (ret)
+ return ret;
+
+ /* We always read from the start of the mbox address */
+ addr = ar_sdio->mbox_info.htc_addr;
+ ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);
+ if (ret) {
+ ath10k_warn(ar,
+ "unable to read the bmi data from the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ memcpy(resp, ar_sdio->bmi_buf, *resp_len);
+
+ return 0;
+}
+
+/* sdio async handling functions */
+
+static struct ath10k_sdio_bus_request
+*ath10k_sdio_alloc_busreq(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_bus_request *bus_req;
+
+ spin_lock_bh(&ar_sdio->lock);
+
+ if (list_empty(&ar_sdio->bus_req_freeq)) {
+ bus_req = NULL;
+ goto out;
+ }
+
+ bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
+ struct ath10k_sdio_bus_request, list);
+ list_del(&bus_req->list);
+
+out:
+ spin_unlock_bh(&ar_sdio->lock);
+ return bus_req;
+}
+
+static void ath10k_sdio_free_bus_req(struct ath10k *ar,
+ struct ath10k_sdio_bus_request *bus_req)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+
+ memset(bus_req, 0, sizeof(*bus_req));
+
+ spin_lock_bh(&ar_sdio->lock);
+ list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
+ spin_unlock_bh(&ar_sdio->lock);
+}
+
+static void __ath10k_sdio_write_async(struct ath10k *ar,
+ struct ath10k_sdio_bus_request *req)
+{
+ struct ath10k_htc_ep *ep;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = req->skb;
+ ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
+ if (ret)
+ ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",
+ req->address, ret);
+
+ if (req->htc_msg) {
+ ep = &ar->htc.endpoint[req->eid];
+ ath10k_htc_notify_tx_completion(ep, skb);
+ } else if (req->comp) {
+ complete(req->comp);
+ }
+
+ ath10k_sdio_free_bus_req(ar, req);
+}
+
+/* To improve throughput use workqueue to deliver packets to HTC layer,
+ * this way SDIO bus is utilised much better.
+ */
+static void ath10k_rx_indication_async_work(struct work_struct *work)
+{
+ struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
+ async_work_rx);
+ struct ath10k *ar = ar_sdio->ar;
+ struct ath10k_htc_ep *ep;
+ struct ath10k_skb_rxcb *cb;
+ struct sk_buff *skb;
+
+ while (true) {
+ skb = skb_dequeue(&ar_sdio->rx_head);
+ if (!skb)
+ break;
+ cb = ATH10K_SKB_RXCB(skb);
+ ep = &ar->htc.endpoint[cb->eid];
+ ep->ep_ops.ep_rx_complete(ar, skb);
+ }
+
+ if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) {
+ local_bh_disable();
+ napi_schedule(&ar->napi);
+ local_bh_enable();
+ }
+}
+
+static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)
+{
+ struct ath10k *ar = ar_sdio->ar;
+ unsigned char rtc_state = 0;
+ int ret = 0;
+
+ rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret);
+ if (ret) {
+ ath10k_warn(ar, "failed to read rtc state: %d\n", ret);
+ return ret;
+ }
+
+ *state = rtc_state & 0x3;
+
+ return ret;
+}
+
+static int ath10k_sdio_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ u32 val;
+ int retry = ATH10K_CIS_READ_RETRY, ret = 0;
+ unsigned char rtc_state = 0;
+
+ sdio_claim_host(ar_sdio->func);
+
+ ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
+ if (ret) {
+ ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
+ ret);
+ goto release;
+ }
+
+ if (enable_sleep) {
+ val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
+ ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE;
+ } else {
+ val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
+ ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE;
+ }
+
+ ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
+ if (ret) {
+ ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
+ ret);
+ }
+
+ if (!enable_sleep) {
+ do {
+ udelay(ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US);
+ ret = ath10k_sdio_read_rtc_state(ar_sdio, &rtc_state);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to disable mbox sleep: %d", ret);
+ break;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n",
+ rtc_state);
+
+ if (rtc_state == ATH10K_CIS_RTC_STATE_ON)
+ break;
+
+ udelay(ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US);
+ retry--;
+ } while (retry > 0);
+ }
+
+release:
+ sdio_release_host(ar_sdio->func);
+
+ return ret;
+}
+
+static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)
+{
+ struct ath10k_sdio *ar_sdio = timer_container_of(ar_sdio, t,
+ sleep_timer);
+
+ ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;
+ queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
+}
+
+static void ath10k_sdio_write_async_work(struct work_struct *work)
+{
+ struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
+ wr_async_work);
+ struct ath10k *ar = ar_sdio->ar;
+ struct ath10k_sdio_bus_request *req, *tmp_req;
+ struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
+
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+
+ list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+ list_del(&req->list);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ if (req->address >= mbox_info->htc_addr &&
+ ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) {
+ ath10k_sdio_set_mbox_sleep(ar, false);
+ mod_timer(&ar_sdio->sleep_timer, jiffies +
+ msecs_to_jiffies(ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS));
+ }
+
+ __ath10k_sdio_write_async(ar, req);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+ }
+
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE)
+ ath10k_sdio_set_mbox_sleep(ar, true);
+}
+
+static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
+ struct sk_buff *skb,
+ struct completion *comp,
+ bool htc_msg, enum ath10k_htc_ep_id eid)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_bus_request *bus_req;
+
+ /* Allocate a bus request for the message and queue it on the
+ * SDIO workqueue.
+ */
+ bus_req = ath10k_sdio_alloc_busreq(ar);
+ if (!bus_req) {
+ ath10k_warn(ar,
+ "unable to allocate bus request for async request\n");
+ return -ENOMEM;
+ }
+
+ bus_req->skb = skb;
+ bus_req->eid = eid;
+ bus_req->address = addr;
+ bus_req->htc_msg = htc_msg;
+ bus_req->comp = comp;
+
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+ list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ return 0;
+}
+
+/* IRQ handler */
+
+static void ath10k_sdio_irq_handler(struct sdio_func *func)
+{
+ struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
+ struct ath10k *ar = ar_sdio->ar;
+ unsigned long timeout;
+ bool done = false;
+ int ret;
+
+ /* Release the host during interrupts so we can pick it back up when
+ * we process commands.
+ */
+ sdio_release_host(ar_sdio->func);
+
+ timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;
+ do {
+ ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);
+ if (ret)
+ break;
+ } while (time_before(jiffies, timeout) && !done);
+
+ ath10k_mac_tx_push_pending(ar);
+
+ sdio_claim_host(ar_sdio->func);
+
+ if (ret && ret != -ECANCELED)
+ ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",
+ ret);
+}
+
+/* sdio HIF functions */
+
+static int ath10k_sdio_disable_intrs(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
+ int ret;
+
+ mutex_lock(&irq_data->mtx);
+
+ memset(regs, 0, sizeof(*regs));
+ ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
+ &regs->int_status_en, sizeof(*regs));
+ if (ret)
+ ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);
+
+ mutex_unlock(&irq_data->mtx);
+
+ return ret;
+}
+
+static int ath10k_sdio_hif_power_up(struct ath10k *ar,
+ enum ath10k_firmware_mode fw_mode)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ if (!ar_sdio->is_disabled)
+ return 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
+
+ ret = ath10k_sdio_config(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to config sdio: %d\n", ret);
+ return ret;
+ }
+
+ sdio_claim_host(func);
+
+ ret = sdio_enable_func(func);
+ if (ret) {
+ ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);
+ sdio_release_host(func);
+ return ret;
+ }
+
+ sdio_release_host(func);
+
+ /* Wait for hardware to initialise. It should take a lot less than
+ * 20 ms but let's be conservative here.
+ */
+ msleep(20);
+
+ ar_sdio->is_disabled = false;
+
+ ret = ath10k_sdio_disable_intrs(ar);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ath10k_sdio_hif_power_down(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ int ret;
+
+ if (ar_sdio->is_disabled)
+ return;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
+
+ timer_delete_sync(&ar_sdio->sleep_timer);
+ ath10k_sdio_set_mbox_sleep(ar, true);
+
+ /* Disable the card */
+ sdio_claim_host(ar_sdio->func);
+
+ ret = sdio_disable_func(ar_sdio->func);
+ if (ret) {
+ ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
+ sdio_release_host(ar_sdio->func);
+ return;
+ }
+
+ ret = mmc_hw_reset(ar_sdio->func->card);
+ if (ret)
+ ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
+
+ sdio_release_host(ar_sdio->func);
+
+ ar_sdio->is_disabled = true;
+}
+
+static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ enum ath10k_htc_ep_id eid;
+ struct sk_buff *skb;
+ int ret, i;
+
+ eid = pipe_id_to_eid(pipe_id);
+
+ for (i = 0; i < n_items; i++) {
+ size_t padded_len;
+ u32 address;
+
+ skb = items[i].transfer_context;
+ padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,
+ skb->len);
+ skb_trim(skb, padded_len);
+
+ /* Write TX data to the end of the mbox address space */
+ address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -
+ skb->len;
+ ret = ath10k_sdio_prep_async_req(ar, address, skb,
+ NULL, true, eid);
+ if (ret)
+ return ret;
+ }
+
+ queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
+
+ return 0;
+}
+
+static int ath10k_sdio_enable_intrs(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
+ int ret;
+
+ mutex_lock(&irq_data->mtx);
+
+ /* Enable all but CPU interrupts */
+ regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |
+ FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |
+ FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);
+
+ /* NOTE: There are some cases where HIF can do detection of
+ * pending mbox messages which is disabled now.
+ */
+ regs->int_status_en |=
+ FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
+
+ /* Set up the CPU Interrupt Status Register, enable CPU sourced interrupt #0
+ * #0 is used for report assertion from target
+ */
+ regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1);
+
+ /* Set up the Error Interrupt status Register */
+ regs->err_int_status_en =
+ FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |
+ FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);
+
+ /* Enable Counter interrupt status register to get fatal errors for
+ * debugging.
+ */
+ regs->cntr_int_status_en =
+ FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
+ ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);
+
+ ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
+ &regs->int_status_en, sizeof(*regs));
+ if (ret)
+ ath10k_warn(ar,
+ "failed to update mbox interrupt status register : %d\n",
+ ret);
+
+ mutex_unlock(&irq_data->mtx);
+ return ret;
+}
+
+/* HIF diagnostics */
+
+static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+{
+ int ret;
+ void *mem;
+
+ mem = kzalloc(buf_len, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ /* set window register to start read cycle */
+ ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
+ if (ret) {
+ ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
+ goto out;
+ }
+
+ /* read the data */
+ ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
+ ret);
+ goto out;
+ }
+
+ memcpy(buf, mem, buf_len);
+
+out:
+ kfree(mem);
+
+ return ret;
+}
+
+static int ath10k_sdio_diag_read32(struct ath10k *ar, u32 address,
+ u32 *value)
+{
+ __le32 *val;
+ int ret;
+
+ val = kzalloc(sizeof(*val), GFP_KERNEL);
+ if (!val)
+ return -ENOMEM;
+
+ ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));
+ if (ret)
+ goto out;
+
+ *value = __le32_to_cpu(*val);
+
+out:
+ kfree(val);
+
+ return ret;
+}
+
+static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
+{
+ int ret;
+
+ /* set write data */
+ ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to write 0x%p to mbox window data address: %d\n",
+ data, ret);
+ return ret;
+ }
+
+ /* set window register, which starts the write cycle */
+ ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);
+ if (ret) {
+ ath10k_warn(ar, "failed to set mbox window write address: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_sdio_hif_start_post(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ u32 addr, val;
+ int ret = 0;
+
+ addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
+
+ ret = ath10k_sdio_diag_read32(ar, addr, &val);
+ if (ret) {
+ ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);
+ return ret;
+ }
+
+ if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio mailbox swap service enabled\n");
+ ar_sdio->swap_mbox = true;
+ } else {
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio mailbox swap service disabled\n");
+ ar_sdio->swap_mbox = false;
+ }
+
+ ath10k_sdio_set_mbox_sleep(ar, true);
+
+ return 0;
+}
+
+static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
+{
+ u32 addr, val;
+ int ret;
+
+ addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
+
+ ret = ath10k_sdio_diag_read32(ar, addr, &val);
+ if (ret) {
+ ath10k_warn(ar,
+ "unable to read hi_acs_flags for htt tx complete: %d\n", ret);
+ return ret;
+ }
+
+ ret = (val & HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK);
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n",
+ ret ? " " : " not ");
+
+ return ret;
+}
+
+/* HIF start/stop */
+
+static int ath10k_sdio_hif_start(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ int ret;
+
+ ath10k_core_napi_enable(ar);
+
+ /* Sleep 20 ms before HIF interrupts are disabled.
+ * This will give target plenty of time to process the BMI done
+ * request before interrupts are disabled.
+ */
+ msleep(20);
+ ret = ath10k_sdio_disable_intrs(ar);
+ if (ret)
+ return ret;
+
+ /* eid 0 always uses the lower part of the extended mailbox address
+ * space (ext_info[0].htc_ext_addr).
+ */
+ ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
+ ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
+
+ sdio_claim_host(ar_sdio->func);
+
+ /* Register the isr */
+ ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);
+ if (ret) {
+ ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);
+ sdio_release_host(ar_sdio->func);
+ return ret;
+ }
+
+ sdio_release_host(ar_sdio->func);
+
+ ret = ath10k_sdio_enable_intrs(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
+
+ /* Enable sleep and then disable it again */
+ ret = ath10k_sdio_set_mbox_sleep(ar, true);
+ if (ret)
+ return ret;
+
+ /* Wait for 20ms for the written value to take effect */
+ msleep(20);
+
+ ret = ath10k_sdio_set_mbox_sleep(ar, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+#define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)
+
+static void ath10k_sdio_irq_disable(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
+ struct sk_buff *skb;
+ struct completion irqs_disabled_comp;
+ int ret;
+
+ skb = dev_alloc_skb(sizeof(*regs));
+ if (!skb)
+ return;
+
+ mutex_lock(&irq_data->mtx);
+
+ memset(regs, 0, sizeof(*regs)); /* disable all interrupts */
+ memcpy(skb->data, regs, sizeof(*regs));
+ skb_put(skb, sizeof(*regs));
+
+ mutex_unlock(&irq_data->mtx);
+
+ init_completion(&irqs_disabled_comp);
+ ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
+ skb, &irqs_disabled_comp, false, 0);
+ if (ret)
+ goto out;
+
+ queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
+
+ /* Wait for the completion of the IRQ disable request.
+ * If there is a timeout we will try to disable irq's anyway.
+ */
+ ret = wait_for_completion_timeout(&irqs_disabled_comp,
+ SDIO_IRQ_DISABLE_TIMEOUT_HZ);
+ if (!ret)
+ ath10k_warn(ar, "sdio irq disable request timed out\n");
+
+ sdio_claim_host(ar_sdio->func);
+
+ ret = sdio_release_irq(ar_sdio->func);
+ if (ret)
+ ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);
+
+ sdio_release_host(ar_sdio->func);
+
+out:
+ kfree_skb(skb);
+}
+
+static void ath10k_sdio_hif_stop(struct ath10k *ar)
+{
+ struct ath10k_sdio_bus_request *req, *tmp_req;
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sk_buff *skb;
+
+ ath10k_sdio_irq_disable(ar);
+
+ cancel_work_sync(&ar_sdio->async_work_rx);
+
+ while ((skb = skb_dequeue(&ar_sdio->rx_head)))
+ dev_kfree_skb_any(skb);
+
+ cancel_work_sync(&ar_sdio->wr_async_work);
+
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+
+ /* Free all bus requests that have not been handled */
+ list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+ struct ath10k_htc_ep *ep;
+
+ list_del(&req->list);
+
+ if (req->htc_msg) {
+ ep = &ar->htc.endpoint[req->eid];
+ ath10k_htc_notify_tx_completion(ep, req->skb);
+ } else if (req->skb) {
+ kfree_skb(req->skb);
+ }
+ ath10k_sdio_free_bus_req(ar, req);
+ }
+
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ ath10k_core_napi_sync_disable(ar);
+}
+
+#ifdef CONFIG_PM
+
+static int ath10k_sdio_hif_suspend(struct ath10k *ar)
+{
+ return 0;
+}
+
+static int ath10k_sdio_hif_resume(struct ath10k *ar)
+{
+ switch (ar->state) {
+ case ATH10K_STATE_OFF:
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio resume configuring sdio\n");
+
+ /* need to set sdio settings after power is cut from sdio */
+ ath10k_sdio_config(ar);
+ break;
+
+ case ATH10K_STATE_ON:
+ default:
+ break;
+ }
+
+ return 0;
+}
+#endif
+
+static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,
+ u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_htc *htc = &ar->htc;
+ u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;
+ enum ath10k_htc_ep_id eid;
+ bool ep_found = false;
+ int i;
+
+ /* For sdio, we are interested in the mapping between eid
+ * and pipeid rather than service_id to pipe_id.
+ * First we find out which eid has been allocated to the
+ * service...
+ */
+ for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
+ if (htc->endpoint[i].service_id == service_id) {
+ eid = htc->endpoint[i].eid;
+ ep_found = true;
+ break;
+ }
+ }
+
+ if (!ep_found)
+ return -EINVAL;
+
+ /* Then we create the simplest mapping possible between pipeid
+ * and eid
+ */
+ *ul_pipe = *dl_pipe = (u8)eid;
+
+ /* Normally, HTT will use the upper part of the extended
+ * mailbox address space (ext_info[1].htc_ext_addr) and WMI ctrl
+ * the lower part (ext_info[0].htc_ext_addr).
+ * If fw wants swapping of mailbox addresses, the opposite is true.
+ */
+ if (ar_sdio->swap_mbox) {
+ htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
+ wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
+ htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
+ wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
+ } else {
+ htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
+ wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
+ htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
+ wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
+ }
+
+ switch (service_id) {
+ case ATH10K_HTC_SVC_ID_RSVD_CTRL:
+ /* HTC ctrl ep mbox address has already been setup in
+ * ath10k_sdio_hif_start
+ */
+ break;
+ case ATH10K_HTC_SVC_ID_WMI_CONTROL:
+ ar_sdio->mbox_addr[eid] = wmi_addr;
+ ar_sdio->mbox_size[eid] = wmi_mbox_size;
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",
+ ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
+ break;
+ case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
+ ar_sdio->mbox_addr[eid] = htt_addr;
+ ar_sdio->mbox_size[eid] = htt_mbox_size;
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio htt data mbox_addr 0x%x mbox_size %d\n",
+ ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
+ break;
+ default:
+ ath10k_warn(ar, "unsupported HTC service id: %d\n",
+ service_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");
+
+ /* HTC ctrl ep (SVC id 1) always has eid (and pipe_id in our
+ * case) == 0
+ */
+ *ul_pipe = 0;
+ *dl_pipe = 0;
+}
+
+static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
+ .tx_sg = ath10k_sdio_hif_tx_sg,
+ .diag_read = ath10k_sdio_hif_diag_read,
+ .diag_write = ath10k_sdio_hif_diag_write_mem,
+ .exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,
+ .start = ath10k_sdio_hif_start,
+ .stop = ath10k_sdio_hif_stop,
+ .start_post = ath10k_sdio_hif_start_post,
+ .get_htt_tx_complete = ath10k_sdio_get_htt_tx_complete,
+ .map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_sdio_hif_get_default_pipe,
+ .power_up = ath10k_sdio_hif_power_up,
+ .power_down = ath10k_sdio_hif_power_down,
+#ifdef CONFIG_PM
+ .suspend = ath10k_sdio_hif_suspend,
+ .resume = ath10k_sdio_hif_resume,
+#endif
+};
+
+#ifdef CONFIG_PM_SLEEP
+
+/* Empty handlers so that mmc subsystem doesn't remove us entirely during
+ * suspend. We instead follow cfg80211 suspend/resume handlers.
+ */
+static int ath10k_sdio_pm_suspend(struct device *device)
+{
+ struct sdio_func *func = dev_to_sdio_func(device);
+ struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
+ struct ath10k *ar = ar_sdio->ar;
+ mmc_pm_flag_t pm_flag, pm_caps;
+ int ret;
+
+ if (!device_may_wakeup(ar->dev))
+ return 0;
+
+ ath10k_sdio_set_mbox_sleep(ar, true);
+
+ pm_flag = MMC_PM_KEEP_POWER;
+
+ ret = sdio_set_host_pm_flags(func, pm_flag);
+ if (ret) {
+ pm_caps = sdio_get_host_pm_caps(func);
+ ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n",
+ pm_flag, pm_caps, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath10k_sdio_pm_resume(struct device *device)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
+ ath10k_sdio_pm_resume);
+
+#define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)
+
+#else
+
+#define ATH10K_SDIO_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
+{
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ int done;
+
+ done = ath10k_htt_rx_hl_indication(ar, budget);
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);
+
+ if (done < budget)
+ napi_complete_done(ctx, done);
+
+ return done;
+}
+
+static int ath10k_sdio_read_host_interest_value(struct ath10k *ar,
+ u32 item_offset,
+ u32 *val)
+{
+ u32 addr;
+ int ret;
+
+ addr = host_interest_item_address(item_offset);
+
+ ret = ath10k_sdio_diag_read32(ar, addr, val);
+
+ if (ret)
+ ath10k_warn(ar, "unable to read host interest offset %d value\n",
+ item_offset);
+
+ return ret;
+}
+
+static int ath10k_sdio_read_mem(struct ath10k *ar, u32 address, void *buf,
+ u32 buf_len)
+{
+ u32 val;
+ int i, ret;
+
+ for (i = 0; i < buf_len; i += 4) {
+ ret = ath10k_sdio_diag_read32(ar, address + i, &val);
+ if (ret) {
+ ath10k_warn(ar, "unable to read mem %d value\n", address + i);
+ break;
+ }
+ memcpy(buf + i, &val, 4);
+ }
+
+ return ret;
+}
+
+static bool ath10k_sdio_is_fast_dump_supported(struct ath10k *ar)
+{
+ u32 param;
+
+ ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_option_flag2), &param);
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param);
+
+ return !!(param & HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW);
+}
+
+static void ath10k_sdio_dump_registers(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data,
+ bool fast_dump)
+{
+ u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
+ int i, ret;
+ u32 reg_dump_area;
+
+ ret = ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_failure_state),
+ &reg_dump_area);
+ if (ret) {
+ ath10k_warn(ar, "failed to read firmware dump area: %d\n", ret);
+ return;
+ }
+
+ if (fast_dump)
+ ret = ath10k_bmi_read_memory(ar, reg_dump_area, reg_dump_values,
+ sizeof(reg_dump_values));
+ else
+ ret = ath10k_sdio_read_mem(ar, reg_dump_area, reg_dump_values,
+ sizeof(reg_dump_values));
+
+ if (ret) {
+ ath10k_warn(ar, "failed to read firmware dump value: %d\n", ret);
+ return;
+ }
+
+ ath10k_err(ar, "firmware register dump:\n");
+ for (i = 0; i < ARRAY_SIZE(reg_dump_values); i += 4)
+ ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ i,
+ reg_dump_values[i],
+ reg_dump_values[i + 1],
+ reg_dump_values[i + 2],
+ reg_dump_values[i + 3]);
+
+ if (!crash_data)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(reg_dump_values); i++)
+ crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]);
+}
+
+static int ath10k_sdio_dump_memory_section(struct ath10k *ar,
+ const struct ath10k_mem_region *mem_region,
+ u8 *buf, size_t buf_len)
+{
+ const struct ath10k_mem_section *cur_section, *next_section;
+ unsigned int count, section_size, skip_size;
+ int ret, i, j;
+
+ if (!mem_region || !buf)
+ return 0;
+
+ cur_section = &mem_region->section_table.sections[0];
+
+ if (mem_region->start > cur_section->start) {
+ ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
+ mem_region->start, cur_section->start);
+ return 0;
+ }
+
+ skip_size = cur_section->start - mem_region->start;
+
+ /* fill the gap between the first register section and register
+ * start address
+ */
+ for (i = 0; i < skip_size; i++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count = 0;
+ i = 0;
+ for (; cur_section; cur_section = next_section) {
+ section_size = cur_section->end - cur_section->start;
+
+ if (section_size <= 0) {
+ ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
+ cur_section->start,
+ cur_section->end);
+ break;
+ }
+
+ if (++i == mem_region->section_table.size) {
+ /* last section */
+ next_section = NULL;
+ skip_size = 0;
+ } else {
+ next_section = cur_section + 1;
+
+ if (cur_section->end > next_section->start) {
+ ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
+ next_section->start,
+ cur_section->end);
+ break;
+ }
+
+ skip_size = next_section->start - cur_section->end;
+ }
+
+ if (buf_len < (skip_size + section_size)) {
+ ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
+ break;
+ }
+
+ buf_len -= skip_size + section_size;
+
+ /* read section to dest memory */
+ ret = ath10k_sdio_read_mem(ar, cur_section->start,
+ buf, section_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
+ cur_section->start, ret);
+ break;
+ }
+
+ buf += section_size;
+ count += section_size;
+
+ /* fill in the gap between this section and the next */
+ for (j = 0; j < skip_size; j++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count += skip_size;
+ }
+
+ return count;
+}
+
+/* if an error happened returns < 0, otherwise the length */
+static int ath10k_sdio_dump_memory_generic(struct ath10k *ar,
+ const struct ath10k_mem_region *current_region,
+ u8 *buf,
+ bool fast_dump)
+{
+ int ret;
+
+ if (current_region->section_table.size > 0)
+ /* Copy each section individually. */
+ return ath10k_sdio_dump_memory_section(ar,
+ current_region,
+ buf,
+ current_region->len);
+
+ /* No individual memory sections defined so we can
+ * copy the entire memory region.
+ */
+ if (fast_dump)
+ ret = ath10k_bmi_read_memory(ar,
+ current_region->start,
+ buf,
+ current_region->len);
+ else
+ ret = ath10k_sdio_read_mem(ar,
+ current_region->start,
+ buf,
+ current_region->len);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
+ current_region->name, ret);
+ return ret;
+ }
+
+ return current_region->len;
+}
+
+static void ath10k_sdio_dump_memory(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data,
+ bool fast_dump)
+{
+ const struct ath10k_hw_mem_layout *mem_layout;
+ const struct ath10k_mem_region *current_region;
+ struct ath10k_dump_ram_data_hdr *hdr;
+ u32 count;
+ size_t buf_len;
+ int ret, i;
+ u8 *buf;
+
+ if (!crash_data)
+ return;
+
+ mem_layout = ath10k_coredump_get_mem_layout(ar);
+ if (!mem_layout)
+ return;
+
+ current_region = &mem_layout->region_table.regions[0];
+
+ buf = crash_data->ramdump_buf;
+ buf_len = crash_data->ramdump_buf_len;
+
+ memset(buf, 0, buf_len);
+
+ for (i = 0; i < mem_layout->region_table.size; i++) {
+ count = 0;
+
+ if (current_region->len > buf_len) {
+ ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
+ current_region->name,
+ current_region->len,
+ buf_len);
+ break;
+ }
+
+ /* Reserve space for the header. */
+ hdr = (void *)buf;
+ buf += sizeof(*hdr);
+ buf_len -= sizeof(*hdr);
+
+ ret = ath10k_sdio_dump_memory_generic(ar, current_region, buf,
+ fast_dump);
+ if (ret >= 0)
+ count = ret;
+
+ hdr->region_type = cpu_to_le32(current_region->type);
+ hdr->start = cpu_to_le32(current_region->start);
+ hdr->length = cpu_to_le32(count);
+
+ if (count == 0)
+ /* Note: the header remains, just with zero length. */
+ break;
+
+ buf += count;
+ buf_len -= count;
+
+ current_region++;
+ }
+}
+
+void ath10k_sdio_fw_crashed_dump(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data;
+ char guid[UUID_STRING_LEN + 1];
+ bool fast_dump;
+
+ fast_dump = ath10k_sdio_is_fast_dump_supported(ar);
+
+ if (fast_dump)
+ ath10k_bmi_start(ar);
+
+ ar->stats.fw_crash_counter++;
+
+ ath10k_sdio_disable_intrs(ar);
+
+ crash_data = ath10k_coredump_new(ar);
+
+ if (crash_data)
+ scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
+ else
+ scnprintf(guid, sizeof(guid), "n/a");
+
+ ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
+ ath10k_print_driver_info(ar);
+ ath10k_sdio_dump_registers(ar, crash_data, fast_dump);
+ ath10k_sdio_dump_memory(ar, crash_data, fast_dump);
+
+ ath10k_sdio_enable_intrs(ar);
+
+ ath10k_core_start_recovery(ar);
+}
+
+static int ath10k_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct ath10k_sdio *ar_sdio;
+ struct ath10k *ar;
+ enum ath10k_hw_rev hw_rev;
+ u32 dev_id_base;
+ struct ath10k_bus_params bus_params = {};
+ int ret, i;
+
+ /* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
+ * If there will be newer chipsets that does not use the hw reg
+ * setup as defined in qca6174_regs and qca6174_values, this
+ * assumption is no longer valid and hw_rev must be setup differently
+ * depending on chipset.
+ */
+ hw_rev = ATH10K_HW_QCA6174;
+
+ ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,
+ hw_rev, &ath10k_sdio_hif_ops);
+ if (!ar) {
+ dev_err(&func->dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ netif_napi_add(ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
+ func->num, func->vendor, func->device,
+ func->max_blksize, func->cur_blksize);
+
+ ar_sdio = ath10k_sdio_priv(ar);
+
+ ar_sdio->irq_data.irq_proc_reg =
+ devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),
+ GFP_KERNEL);
+ if (!ar_sdio->irq_data.irq_proc_reg) {
+ ret = -ENOMEM;
+ goto err_core_destroy;
+ }
+
+ ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL);
+ if (!ar_sdio->vsg_buffer) {
+ ret = -ENOMEM;
+ goto err_core_destroy;
+ }
+
+ ar_sdio->irq_data.irq_en_reg =
+ devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
+ GFP_KERNEL);
+ if (!ar_sdio->irq_data.irq_en_reg) {
+ ret = -ENOMEM;
+ goto err_core_destroy;
+ }
+
+ ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL);
+ if (!ar_sdio->bmi_buf) {
+ ret = -ENOMEM;
+ goto err_core_destroy;
+ }
+
+ ar_sdio->func = func;
+ sdio_set_drvdata(func, ar_sdio);
+
+ ar_sdio->is_disabled = true;
+ ar_sdio->ar = ar;
+
+ spin_lock_init(&ar_sdio->lock);
+ spin_lock_init(&ar_sdio->wr_async_lock);
+ mutex_init(&ar_sdio->irq_data.mtx);
+
+ INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
+ INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
+
+ INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);
+ ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
+ if (!ar_sdio->workqueue) {
+ ret = -ENOMEM;
+ goto err_core_destroy;
+ }
+
+ for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
+ ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
+
+ skb_queue_head_init(&ar_sdio->rx_head);
+ INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);
+
+ dev_id_base = (id->device & 0x0F00);
+ if (dev_id_base != (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00) &&
+ dev_id_base != (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00)) {
+ ret = -ENODEV;
+ ath10k_err(ar, "unsupported device id %u (0x%x)\n",
+ dev_id_base, id->device);
+ goto err_free_wq;
+ }
+
+ ar->dev_id = QCA9377_1_0_DEVICE_ID;
+ ar->id.vendor = id->vendor;
+ ar->id.device = id->device;
+
+ ath10k_sdio_set_mbox_info(ar);
+
+ bus_params.dev_type = ATH10K_DEV_TYPE_HL;
+ /* TODO: don't know yet how to get chip_id with SDIO */
+ bus_params.chip_id = 0;
+ bus_params.hl_msdu_ids = true;
+
+ ar->hw->max_mtu = ETH_DATA_LEN;
+
+ ret = ath10k_core_register(ar, &bus_params);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n", ret);
+ goto err_free_wq;
+ }
+
+ timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0);
+
+ return 0;
+
+err_free_wq:
+ destroy_workqueue(ar_sdio->workqueue);
+err_core_destroy:
+ ath10k_core_destroy(ar);
+
+ return ret;
+}
+
+static void ath10k_sdio_remove(struct sdio_func *func)
+{
+ struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
+ struct ath10k *ar = ar_sdio->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "sdio removed func %d vendor 0x%x device 0x%x\n",
+ func->num, func->vendor, func->device);
+
+ ath10k_core_unregister(ar);
+
+ netif_napi_del(&ar->napi);
+
+ destroy_workqueue(ar_sdio->workqueue);
+
+ ath10k_core_destroy(ar);
+}
+
+static const struct sdio_device_id ath10k_sdio_devices[] = {
+ {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6005)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_QCA9377)},
+ {},
+};
+
+MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
+
+static struct sdio_driver ath10k_sdio_driver = {
+ .name = "ath10k_sdio",
+ .id_table = ath10k_sdio_devices,
+ .probe = ath10k_sdio_probe,
+ .remove = ath10k_sdio_remove,
+ .drv = {
+ .pm = ATH10K_SDIO_PM_OPS,
+ },
+};
+module_sdio_driver(ath10k_sdio_driver);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h
new file mode 100644
index 000000000000..b6ac927628b1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/sdio.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
+ */
+
+#ifndef _SDIO_H_
+#define _SDIO_H_
+
+#define ATH10K_HIF_MBOX_BLOCK_SIZE 256
+
+#define ATH10K_SDIO_MAX_BUFFER_SIZE 4096 /*Unsure of this constant*/
+
+/* Mailbox address in SDIO address space */
+#define ATH10K_HIF_MBOX_BASE_ADDR 0x1000
+#define ATH10K_HIF_MBOX_WIDTH 0x800
+
+#define ATH10K_HIF_MBOX_TOT_WIDTH \
+ (ATH10K_HIF_MBOX_NUM_MAX * ATH10K_HIF_MBOX_WIDTH)
+
+#define ATH10K_HIF_MBOX0_EXT_BASE_ADDR 0x5000
+#define ATH10K_HIF_MBOX0_EXT_WIDTH (36 * 1024)
+#define ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0 (56 * 1024)
+#define ATH10K_HIF_MBOX1_EXT_WIDTH (36 * 1024)
+#define ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE (2 * 1024)
+
+#define ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH \
+ (ATH10K_SDIO_MAX_BUFFER_SIZE - sizeof(struct ath10k_htc_hdr))
+
+#define ATH10K_HIF_MBOX_NUM_MAX 4
+#define ATH10K_SDIO_BUS_REQUEST_MAX_NUM 1024
+
+#define ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ (100 * HZ)
+
+/* HTC runs over mailbox 0 */
+#define ATH10K_HTC_MAILBOX 0
+#define ATH10K_HTC_MAILBOX_MASK BIT(ATH10K_HTC_MAILBOX)
+
+/* GMBOX addresses */
+#define ATH10K_HIF_GMBOX_BASE_ADDR 0x7000
+#define ATH10K_HIF_GMBOX_WIDTH 0x4000
+
+/* Modified versions of the sdio.h macros.
+ * The macros in sdio.h can't be used easily with the FIELD_{PREP|GET}
+ * macros in bitfield.h, so we define our own macros here.
+ */
+#define ATH10K_SDIO_DRIVE_DTSX_MASK \
+ (SDIO_DRIVE_DTSx_MASK << SDIO_DRIVE_DTSx_SHIFT)
+
+#define ATH10K_SDIO_DRIVE_DTSX_TYPE_B 0
+#define ATH10K_SDIO_DRIVE_DTSX_TYPE_A 1
+#define ATH10K_SDIO_DRIVE_DTSX_TYPE_C 2
+#define ATH10K_SDIO_DRIVE_DTSX_TYPE_D 3
+
+/* SDIO CCCR register definitions */
+#define CCCR_SDIO_IRQ_MODE_REG 0xF0
+#define CCCR_SDIO_IRQ_MODE_REG_SDIO3 0x16
+
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR 0xF2
+
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A 0x02
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C 0x04
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D 0x08
+
+#define CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS 0xF0
+#define CCCR_SDIO_ASYNC_INT_DELAY_MASK 0xC0
+
+/* mode to enable special 4-bit interrupt assertion without clock */
+#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ BIT(0)
+#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3 BIT(1)
+
+#define ATH10K_SDIO_TARGET_DEBUG_INTR_MASK 0x01
+
+/* The theoretical maximum number of RX messages that can be fetched
+ * from the mbox interrupt handler in one loop is derived in the following
+ * way:
+ *
+ * Let's assume that each packet in a bundle of the maximum bundle size
+ * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set
+ * to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
+ *
+ * in this case the driver must allocate
+ * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2) skb's.
+ */
+#define ATH10K_SDIO_MAX_RX_MSGS \
+ (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * 2)
+
+#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u
+#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
+#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000
+
+enum sdio_mbox_state {
+ SDIO_MBOX_UNKNOWN_STATE = 0,
+ SDIO_MBOX_REQUEST_TO_SLEEP_STATE = 1,
+ SDIO_MBOX_SLEEP_STATE = 2,
+ SDIO_MBOX_AWAKE_STATE = 3,
+};
+
+#define ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US 125
+#define ATH10K_CIS_RTC_STATE_ADDR 0x1138
+#define ATH10K_CIS_RTC_STATE_ON 0x01
+#define ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US 1500
+#define ATH10K_CIS_READ_RETRY 10
+#define ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS 50
+
+/* TODO: remove this and use skb->cb instead, much cleaner approach */
+struct ath10k_sdio_bus_request {
+ struct list_head list;
+
+ /* sdio address */
+ u32 address;
+
+ struct sk_buff *skb;
+ enum ath10k_htc_ep_id eid;
+ int status;
+ /* Specifies if the current request is an HTC message.
+ * If not, the eid is not applicable an the TX completion handler
+ * associated with the endpoint will not be invoked.
+ */
+ bool htc_msg;
+ /* Completion that (if set) will be invoked for non HTC requests
+ * (htc_msg == false) when the request has been processed.
+ */
+ struct completion *comp;
+};
+
+struct ath10k_sdio_rx_data {
+ struct sk_buff *skb;
+ size_t alloc_len;
+ size_t act_len;
+ enum ath10k_htc_ep_id eid;
+ bool part_of_bundle;
+ bool last_in_bundle;
+ bool trailer_only;
+};
+
+struct ath10k_sdio_irq_proc_regs {
+ u8 host_int_status;
+ u8 cpu_int_status;
+ u8 error_int_status;
+ u8 counter_int_status;
+ u8 mbox_frame;
+ u8 rx_lookahead_valid;
+ u8 host_int_status2;
+ u8 gmbox_rx_avail;
+ __le32 rx_lookahead[2 * ATH10K_HIF_MBOX_NUM_MAX];
+ __le32 int_status_enable;
+};
+
+struct ath10k_sdio_irq_enable_regs {
+ u8 int_status_en;
+ u8 cpu_int_status_en;
+ u8 err_int_status_en;
+ u8 cntr_int_status_en;
+};
+
+struct ath10k_sdio_irq_data {
+ /* protects irq_proc_reg and irq_en_reg below.
+ * We use a mutex here and not a spinlock since we will have the
+ * mutex locked while calling the sdio_memcpy_ functions.
+ * These function require non atomic context, and hence, spinlocks
+ * can be held while calling these functions.
+ */
+ struct mutex mtx;
+ struct ath10k_sdio_irq_proc_regs *irq_proc_reg;
+ struct ath10k_sdio_irq_enable_regs *irq_en_reg;
+};
+
+struct ath10k_mbox_ext_info {
+ u32 htc_ext_addr;
+ u32 htc_ext_sz;
+};
+
+struct ath10k_mbox_info {
+ u32 htc_addr;
+ struct ath10k_mbox_ext_info ext_info[2];
+ u32 block_size;
+ u32 block_mask;
+ u32 gmbox_addr;
+ u32 gmbox_sz;
+};
+
+struct ath10k_sdio {
+ struct sdio_func *func;
+
+ struct ath10k_mbox_info mbox_info;
+ bool swap_mbox;
+ u32 mbox_addr[ATH10K_HTC_EP_COUNT];
+ u32 mbox_size[ATH10K_HTC_EP_COUNT];
+
+ /* available bus requests */
+ struct ath10k_sdio_bus_request bus_req[ATH10K_SDIO_BUS_REQUEST_MAX_NUM];
+ /* free list of bus requests */
+ struct list_head bus_req_freeq;
+
+ struct sk_buff_head rx_head;
+
+ /* protects access to bus_req_freeq */
+ spinlock_t lock;
+
+ struct ath10k_sdio_rx_data rx_pkts[ATH10K_SDIO_MAX_RX_MSGS];
+ size_t n_rx_pkts;
+
+ struct ath10k *ar;
+ struct ath10k_sdio_irq_data irq_data;
+
+ /* temporary buffer for sdio read.
+ * It is allocated when probe, and used for receive bundled packets,
+ * the read for bundled packets is not parallel, so it does not need
+ * protected.
+ */
+ u8 *vsg_buffer;
+
+ /* temporary buffer for BMI requests */
+ u8 *bmi_buf;
+
+ bool is_disabled;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct wr_async_work;
+ struct list_head wr_asyncq;
+ /* protects access to wr_asyncq */
+ spinlock_t wr_async_lock;
+
+ struct work_struct async_work_rx;
+ struct timer_list sleep_timer;
+ enum sdio_mbox_state mbox_state;
+};
+
+static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar)
+{
+ return (struct ath10k_sdio *)ar->drv_priv;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
new file mode 100644
index 000000000000..b3f6424c17d3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -0,0 +1,1895 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/remoteproc/qcom_rproc.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/iommu.h>
+
+#include "ce.h"
+#include "coredump.h"
+#include "debug.h"
+#include "hif.h"
+#include "htc.h"
+#include "snoc.h"
+
+#define ATH10K_SNOC_RX_POST_RETRY_MS 50
+#define CE_POLL_PIPE 4
+#define ATH10K_SNOC_WAKE_IRQ 2
+
+static char *const ce_name[] = {
+ "WLAN_CE_0",
+ "WLAN_CE_1",
+ "WLAN_CE_2",
+ "WLAN_CE_3",
+ "WLAN_CE_4",
+ "WLAN_CE_5",
+ "WLAN_CE_6",
+ "WLAN_CE_7",
+ "WLAN_CE_8",
+ "WLAN_CE_9",
+ "WLAN_CE_10",
+ "WLAN_CE_11",
+};
+
+static const char * const ath10k_regulators[] = {
+ "vdd-0.8-cx-mx",
+ "vdd-1.8-xo",
+ "vdd-1.3-rfa",
+ "vdd-3.3-ch0",
+ "vdd-3.3-ch1",
+};
+
+static const char * const ath10k_clocks[] = {
+ "cxo_ref_clk_pin", "qdss",
+};
+
+static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
+
+static const struct ath10k_snoc_drv_priv drv_priv = {
+ .hw_rev = ATH10K_HW_WCN3990,
+ .dma_mask = DMA_BIT_MASK(35),
+ .msa_size = 0x100000,
+};
+
+#define WCN3990_SRC_WR_IDX_OFFSET 0x3C
+#define WCN3990_DST_WR_IDX_OFFSET 0x40
+
+static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
+ {
+ .ce_id = __cpu_to_le16(0),
+ .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(3),
+ .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(4),
+ .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(5),
+ .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(7),
+ .reg_offset = __cpu_to_le16(WCN3990_SRC_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(1),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(2),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(7),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(8),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(9),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(10),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+
+ {
+ .ce_id = __cpu_to_le16(11),
+ .reg_offset = __cpu_to_le16(WCN3990_DST_WR_IDX_OFFSET),
+ },
+};
+
+static struct ce_attr host_ce_config_wlan[] = {
+ /* CE0: host->target HTC control streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath10k_snoc_htc_tx_cb,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 64,
+ .recv_cb = ath10k_snoc_htc_rx_cb,
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath10k_snoc_htc_tx_cb,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ .send_cb = ath10k_snoc_htt_tx_cb,
+ },
+
+ /* CE5: target->host HTT (ipa_uc->target ) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 512,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_snoc_htt_rx_cb,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: ce_diag, the Diagnostic Window */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 2,
+ .src_sz_max = 2048,
+ .dest_nentries = 2,
+ },
+
+ /* CE8: Target to uMC */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ },
+
+ /* CE9 target->host HTT */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+ },
+
+ /* CE10: target->host HTT */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+ },
+
+ /* CE11: target -> host PKTLOG */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_snoc_pktlog_rx_cb,
+ },
+};
+
+static struct ce_pipe_config target_ce_config_wlan[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(64),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host HTT (HIF->HTT) */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(1024),
+ .nbytes_max = __cpu_to_le32(64),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(4),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 Target to uMC */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9 target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE10 target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(10),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE11 target autonomous qcache memcpy */
+ {
+ .pipenum = __cpu_to_le32(11),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+};
+
+static struct ce_service_to_pipe target_service_to_ce_map_wlan[] = {
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(5),
+ },
+ { /* in = DL = target -> host */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(9),
+ },
+ { /* in = DL = target -> host */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(10),
+ },
+ { /* in = DL = target -> host pktlog */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(11),
+ },
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+static void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ iowrite32(value, ar_snoc->mem + offset);
+}
+
+static u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ u32 val;
+
+ val = ioread32(ar_snoc->mem + offset);
+
+ return val;
+}
+
+static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
+{
+ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+ struct ath10k *ar = pipe->hif_ce_state;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret;
+
+ skb = dev_alloc_skb(pipe->buf_sz);
+ if (!skb)
+ return -ENOMEM;
+
+ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+ paddr = dma_map_single(ar->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ar->dev, paddr))) {
+ ath10k_warn(ar, "failed to dma map snoc rx buf\n");
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+
+ ATH10K_SKB_RXCB(skb)->paddr = paddr;
+
+ spin_lock_bh(&ce->ce_lock);
+ ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
+ spin_unlock_bh(&ce->ce_lock);
+ if (ret) {
+ dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
+{
+ struct ath10k *ar = pipe->hif_ce_state;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+ int ret, num;
+
+ if (pipe->buf_sz == 0)
+ return;
+
+ if (!ce_pipe->dest_ring)
+ return;
+
+ spin_lock_bh(&ce->ce_lock);
+ num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
+ spin_unlock_bh(&ce->ce_lock);
+ while (num--) {
+ ret = __ath10k_snoc_rx_post_buf(pipe);
+ if (ret) {
+ if (ret == -ENOSPC)
+ break;
+ ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
+ mod_timer(&ar_snoc->rx_post_retry, jiffies +
+ ATH10K_SNOC_RX_POST_RETRY_MS);
+ break;
+ }
+ }
+}
+
+static void ath10k_snoc_rx_post(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
+}
+
+static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
+ void (*callback)(struct ath10k *ar,
+ struct sk_buff *skb))
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_pipe *pipe_info = &ar_snoc->pipe_info[ce_state->id];
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ void *transfer_context;
+ unsigned int nbytes, max_nbytes;
+
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+ &nbytes) == 0) {
+ skb = transfer_context;
+ max_nbytes = skb->len + skb_tailroom(skb);
+ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)\n",
+ nbytes, max_nbytes);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
+ ce_state->id, skb->len);
+
+ callback(ar, skb);
+ }
+
+ ath10k_snoc_rx_post_pipe(pipe_info);
+}
+
+static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ /* CE4 polling needs to be done whenever CE pipe which transports
+ * HTT Rx (target->host) is processed.
+ */
+ ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
+
+ ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+/* Called by lower (CE) layer when data is received from the Target.
+ * WCN3990 firmware uses separate CE(CE11) to transfer pktlog data.
+ */
+static void ath10k_snoc_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
+{
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ ath10k_htt_t2h_msg_handler(ar, skb);
+}
+
+static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
+ ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
+}
+
+static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
+{
+ struct ath10k_snoc *ar_snoc = timer_container_of(ar_snoc, t,
+ rx_post_retry);
+ struct ath10k *ar = ar_snoc->ar;
+
+ ath10k_snoc_rx_post(ar);
+}
+
+static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+ if (!skb)
+ continue;
+
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list)))
+ ath10k_htc_tx_completion_handler(ar, skb);
+}
+
+static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct sk_buff *skb;
+
+ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+ skb->len, DMA_TO_DEVICE);
+ ath10k_htt_hif_tx_complete(ar, skb);
+ }
+}
+
+static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_snoc_pipe *snoc_pipe;
+ struct ath10k_ce_pipe *ce_pipe;
+ int err, i = 0;
+
+ snoc_pipe = &ar_snoc->pipe_info[pipe_id];
+ ce_pipe = snoc_pipe->ce_hdl;
+ spin_lock_bh(&ce->ce_lock);
+
+ for (i = 0; i < n_items - 1; i++) {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "snoc tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ CE_SEND_FLAG_GATHER);
+ if (err)
+ goto err;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "snoc tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ 0);
+ if (err)
+ goto err;
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ return 0;
+
+err:
+ for (; i > 0; i--)
+ __ath10k_ce_send_revert(ce_pipe);
+
+ spin_unlock_bh(&ce->ce_lock);
+ return err;
+}
+
+static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
+ struct bmi_target_info *target_info)
+{
+ target_info->version = ATH10K_HW_WCN3990;
+ target_info->type = ATH10K_HW_WCN3990;
+
+ return 0;
+}
+
+static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
+
+ return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
+}
+
+static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+ int force)
+{
+ int resources;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
+
+ if (!force) {
+ resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
+
+ if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
+ return;
+ }
+ ath10k_ce_per_engine_service(ar, pipe);
+}
+
+static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
+ u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct ce_service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
+
+ for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
+ entry = &target_service_to_ce_map_wlan[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (!ul_set || !dl_set)
+ return -ENOENT;
+
+ return 0;
+}
+
+static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
+
+ (void)ath10k_snoc_hif_map_service_to_pipe(ar,
+ ATH10K_HTC_SVC_ID_RSVD_CTRL,
+ ul_pipe, dl_pipe);
+}
+
+static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int id;
+
+ for (id = 0; id < CE_COUNT_MAX; id++)
+ disable_irq(ar_snoc->ce_irqs[id].irq_line);
+}
+
+static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int id;
+
+ for (id = 0; id < CE_COUNT_MAX; id++)
+ enable_irq(ar_snoc->ce_irqs[id].irq_line);
+}
+
+static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+{
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct sk_buff *skb;
+ struct ath10k *ar;
+ int i;
+
+ ar = snoc_pipe->hif_ce_state;
+ ce_pipe = snoc_pipe->ce_hdl;
+ ce_ring = ce_pipe->dest_ring;
+
+ if (!ce_ring)
+ return;
+
+ if (!snoc_pipe->buf_sz)
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
+
+ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+{
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct sk_buff *skb;
+ struct ath10k *ar;
+ int i;
+
+ ar = snoc_pipe->hif_ce_state;
+ ce_pipe = snoc_pipe->ce_hdl;
+ ce_ring = ce_pipe->src_ring;
+
+ if (!ce_ring)
+ return;
+
+ if (!snoc_pipe->buf_sz)
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
+
+ ath10k_htc_tx_completion_handler(ar, skb);
+ }
+}
+
+static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_pipe *pipe_info;
+ int pipe_num;
+
+ timer_delete_sync(&ar_snoc->rx_post_retry);
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+ pipe_info = &ar_snoc->pipe_info[pipe_num];
+ ath10k_snoc_rx_pipe_cleanup(pipe_info);
+ ath10k_snoc_tx_pipe_cleanup(pipe_info);
+ }
+}
+
+static void ath10k_snoc_hif_stop(struct ath10k *ar)
+{
+ if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ ath10k_snoc_irq_disable(ar);
+
+ ath10k_core_napi_sync_disable(ar);
+ ath10k_snoc_buffer_cleanup(ar);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
+}
+
+static int ath10k_snoc_hif_start(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX);
+
+ netif_threaded_enable(ar->napi_dev);
+ ath10k_core_napi_enable(ar);
+ /* IRQs are left enabled when we restart due to a firmware crash */
+ if (!test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
+ ath10k_snoc_irq_enable(ar);
+ ath10k_snoc_rx_post(ar);
+
+ clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
+
+ return 0;
+}
+
+static int ath10k_snoc_init_pipes(struct ath10k *ar)
+{
+ int i, ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_snoc_wlan_enable(struct ath10k *ar,
+ enum ath10k_firmware_mode fw_mode)
+{
+ struct ath10k_tgt_pipe_cfg tgt_cfg[CE_COUNT_MAX];
+ struct ath10k_qmi_wlan_enable_cfg cfg;
+ enum wlfw_driver_mode_enum_v01 mode;
+ int pipe_num;
+
+ for (pipe_num = 0; pipe_num < CE_COUNT_MAX; pipe_num++) {
+ tgt_cfg[pipe_num].pipe_num =
+ target_ce_config_wlan[pipe_num].pipenum;
+ tgt_cfg[pipe_num].pipe_dir =
+ target_ce_config_wlan[pipe_num].pipedir;
+ tgt_cfg[pipe_num].nentries =
+ target_ce_config_wlan[pipe_num].nentries;
+ tgt_cfg[pipe_num].nbytes_max =
+ target_ce_config_wlan[pipe_num].nbytes_max;
+ tgt_cfg[pipe_num].flags =
+ target_ce_config_wlan[pipe_num].flags;
+ tgt_cfg[pipe_num].reserved = 0;
+ }
+
+ cfg.num_ce_tgt_cfg = sizeof(target_ce_config_wlan) /
+ sizeof(struct ath10k_tgt_pipe_cfg);
+ cfg.ce_tgt_cfg = (struct ath10k_tgt_pipe_cfg *)
+ &tgt_cfg;
+ cfg.num_ce_svc_pipe_cfg = sizeof(target_service_to_ce_map_wlan) /
+ sizeof(struct ath10k_svc_pipe_cfg);
+ cfg.ce_svc_cfg = (struct ath10k_svc_pipe_cfg *)
+ &target_service_to_ce_map_wlan;
+ cfg.num_shadow_reg_cfg = ARRAY_SIZE(target_shadow_reg_cfg_map);
+ cfg.shadow_reg_cfg = (struct ath10k_shadow_reg_cfg *)
+ &target_shadow_reg_cfg_map;
+
+ switch (fw_mode) {
+ case ATH10K_FIRMWARE_MODE_NORMAL:
+ mode = QMI_WLFW_MISSION_V01;
+ break;
+ case ATH10K_FIRMWARE_MODE_UTF:
+ mode = QMI_WLFW_FTM_V01;
+ break;
+ default:
+ ath10k_err(ar, "invalid firmware mode %d\n", fw_mode);
+ return -EINVAL;
+ }
+
+ return ath10k_qmi_wlan_enable(ar, &cfg, mode,
+ NULL);
+}
+
+static int ath10k_hw_power_on(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
+
+ ret = regulator_bulk_enable(ar_snoc->num_vregs, ar_snoc->vregs);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(ar_snoc->num_clks, ar_snoc->clks);
+ if (ret)
+ goto vreg_off;
+
+ return ret;
+
+vreg_off:
+ regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
+ return ret;
+}
+
+static int ath10k_hw_power_off(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
+
+ clk_bulk_disable_unprepare(ar_snoc->num_clks, ar_snoc->clks);
+
+ return regulator_bulk_disable(ar_snoc->num_vregs, ar_snoc->vregs);
+}
+
+static void ath10k_snoc_wlan_disable(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ /* If both ATH10K_FLAG_CRASH_FLUSH and ATH10K_SNOC_FLAG_RECOVERY
+ * flags are not set, it means that the driver has restarted
+ * due to a crash inject via debugfs. In this case, the driver
+ * needs to restart the firmware and hence send qmi wlan disable,
+ * during the driver restart sequence.
+ */
+ if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags) ||
+ !test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
+ ath10k_qmi_wlan_disable(ar);
+}
+
+static void ath10k_snoc_hif_power_down(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
+
+ ath10k_snoc_wlan_disable(ar);
+ ath10k_ce_free_rri(ar);
+ ath10k_hw_power_off(ar);
+}
+
+static int ath10k_snoc_hif_power_up(struct ath10k *ar,
+ enum ath10k_firmware_mode fw_mode)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
+ __func__, ar->state);
+
+ ret = ath10k_hw_power_on(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to power on device: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_snoc_wlan_enable(ar, fw_mode);
+ if (ret) {
+ ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
+ goto err_hw_power_off;
+ }
+
+ ath10k_ce_alloc_rri(ar);
+
+ ret = ath10k_snoc_init_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+ goto err_free_rri;
+ }
+
+ ath10k_ce_enable_interrupts(ar);
+
+ return 0;
+
+err_free_rri:
+ ath10k_ce_free_rri(ar);
+ ath10k_snoc_wlan_disable(ar);
+
+err_hw_power_off:
+ ath10k_hw_power_off(ar);
+
+ return ret;
+}
+
+static int ath10k_snoc_hif_set_target_log_mode(struct ath10k *ar,
+ u8 fw_log_mode)
+{
+ u8 fw_dbg_mode;
+
+ if (fw_log_mode)
+ fw_dbg_mode = ATH10K_ENABLE_FW_LOG_CE;
+ else
+ fw_dbg_mode = ATH10K_ENABLE_FW_LOG_DIAG;
+
+ return ath10k_qmi_set_fw_log_mode(ar, fw_dbg_mode);
+}
+
+#ifdef CONFIG_PM
+static int ath10k_snoc_hif_suspend(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int ret;
+
+ if (!device_may_wakeup(ar->dev))
+ return -EPERM;
+
+ ret = enable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
+ if (ret) {
+ ath10k_err(ar, "failed to enable wakeup irq :%d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device suspended\n");
+
+ return ret;
+}
+
+static int ath10k_snoc_hif_resume(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int ret;
+
+ if (!device_may_wakeup(ar->dev))
+ return -EPERM;
+
+ ret = disable_irq_wake(ar_snoc->ce_irqs[ATH10K_SNOC_WAKE_IRQ].irq_line);
+ if (ret) {
+ ath10k_err(ar, "failed to disable wakeup irq: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc device resumed\n");
+
+ return ret;
+}
+#endif
+
+static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
+ .read32 = ath10k_snoc_read32,
+ .write32 = ath10k_snoc_write32,
+ .start = ath10k_snoc_hif_start,
+ .stop = ath10k_snoc_hif_stop,
+ .map_service_to_pipe = ath10k_snoc_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_snoc_hif_get_default_pipe,
+ .power_up = ath10k_snoc_hif_power_up,
+ .power_down = ath10k_snoc_hif_power_down,
+ .tx_sg = ath10k_snoc_hif_tx_sg,
+ .send_complete_check = ath10k_snoc_hif_send_complete_check,
+ .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number,
+ .get_target_info = ath10k_snoc_hif_get_target_info,
+ .set_target_log_mode = ath10k_snoc_hif_set_target_log_mode,
+
+#ifdef CONFIG_PM
+ .suspend = ath10k_snoc_hif_suspend,
+ .resume = ath10k_snoc_hif_resume,
+#endif
+};
+
+static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
+ .read32 = ath10k_snoc_read32,
+ .write32 = ath10k_snoc_write32,
+};
+
+static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int i;
+
+ for (i = 0; i < CE_COUNT_MAX; i++) {
+ if (ar_snoc->ce_irqs[i].irq_line == irq)
+ return i;
+ }
+ ath10k_err(ar, "No matching CE id for irq %d\n", irq);
+
+ return -EINVAL;
+}
+
+static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
+
+ if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
+ ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
+ ce_id);
+ return IRQ_HANDLED;
+ }
+
+ ath10k_ce_disable_interrupt(ar, ce_id);
+ set_bit(ce_id, ar_snoc->pending_ce_irqs);
+
+ napi_schedule(&ar->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
+{
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int done = 0;
+ int ce_id;
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
+ napi_complete(ctx);
+ return done;
+ }
+
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ if (test_and_clear_bit(ce_id, ar_snoc->pending_ce_irqs)) {
+ ath10k_ce_per_engine_service(ar, ce_id);
+ ath10k_ce_enable_interrupt(ar, ce_id);
+ }
+
+ done = ath10k_htt_txrx_compl_task(ar, budget);
+
+ if (done < budget)
+ napi_complete(ctx);
+
+ return done;
+}
+
+static void ath10k_snoc_init_napi(struct ath10k *ar)
+{
+ netif_napi_add(ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll);
+}
+
+static int ath10k_snoc_request_irq(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int ret, id;
+
+ for (id = 0; id < CE_COUNT_MAX; id++) {
+ ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
+ ath10k_snoc_per_engine_handler,
+ IRQF_NO_AUTOEN, ce_name[id], ar);
+ if (ret) {
+ ath10k_err(ar,
+ "failed to register IRQ handler for CE %d: %d\n",
+ id, ret);
+ goto err_irq;
+ }
+ }
+
+ return 0;
+
+err_irq:
+ for (id -= 1; id >= 0; id--)
+ free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
+
+ return ret;
+}
+
+static void ath10k_snoc_free_irq(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int id;
+
+ for (id = 0; id < CE_COUNT_MAX; id++)
+ free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
+}
+
+static int ath10k_snoc_resource_init(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct platform_device *pdev;
+ struct resource *res;
+ int i, ret = 0;
+
+ pdev = ar_snoc->dev;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
+ if (!res) {
+ ath10k_err(ar, "Memory base not found in DT\n");
+ return -EINVAL;
+ }
+
+ ar_snoc->mem_pa = res->start;
+ ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
+ resource_size(res));
+ if (!ar_snoc->mem) {
+ ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
+ &ar_snoc->mem_pa);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ret = platform_get_irq(ar_snoc->dev, i);
+ if (ret < 0)
+ return ret;
+ ar_snoc->ce_irqs[i].irq_line = ret;
+ }
+
+ ret = device_property_read_u32(&pdev->dev, "qcom,xo-cal-data",
+ &ar_snoc->xo_cal_data);
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc xo-cal-data return %d\n", ret);
+ if (ret == 0) {
+ ar_snoc->xo_cal_supported = true;
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "xo cal data %x\n",
+ ar_snoc->xo_cal_data);
+ }
+
+ return 0;
+}
+
+static void ath10k_snoc_quirks_init(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct device *dev = &ar_snoc->dev->dev;
+
+ /* ignore errors, keep NULL if there is no property */
+ of_property_read_string(dev->of_node, "firmware-name", &ar->board_name);
+
+ if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
+ set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
+}
+
+int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_bus_params bus_params = {};
+ int ret;
+
+ if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
+ return 0;
+
+ switch (type) {
+ case ATH10K_QMI_EVENT_FW_READY_IND:
+ if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
+ ath10k_core_start_recovery(ar);
+ break;
+ }
+
+ bus_params.dev_type = ATH10K_DEV_TYPE_LL;
+ bus_params.chip_id = ar_snoc->target_info.soc_version;
+ ret = ath10k_core_register(ar, &bus_params);
+ if (ret) {
+ ath10k_err(ar, "Failed to register driver core: %d\n",
+ ret);
+ return ret;
+ }
+ set_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags);
+ break;
+ case ATH10K_QMI_EVENT_FW_DOWN_IND:
+ set_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
+ set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+ break;
+ default:
+ ath10k_err(ar, "invalid fw indication: %llx\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ath10k_snoc_setup_resource(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_snoc_pipe *pipe;
+ int i, ret;
+
+ timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
+ spin_lock_init(&ce->ce_lock);
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ar_snoc->pipe_info[i];
+ pipe->ce_hdl = &ce->ce_states[i];
+ pipe->pipe_num = i;
+ pipe->hif_ce_state = ar;
+
+ ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+ if (ret) {
+ ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+
+ pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
+ }
+ ath10k_snoc_init_napi(ar);
+
+ return 0;
+}
+
+static void ath10k_snoc_release_resource(struct ath10k *ar)
+{
+ int i;
+
+ netif_napi_del(&ar->napi);
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_free_pipe(ar, i);
+}
+
+static void ath10k_msa_dump_memory(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
+{
+ const struct ath10k_hw_mem_layout *mem_layout;
+ const struct ath10k_mem_region *current_region;
+ struct ath10k_dump_ram_data_hdr *hdr;
+ size_t buf_len;
+ u8 *buf;
+
+ if (!crash_data || !crash_data->ramdump_buf)
+ return;
+
+ mem_layout = ath10k_coredump_get_mem_layout(ar);
+ if (!mem_layout)
+ return;
+
+ current_region = &mem_layout->region_table.regions[0];
+
+ buf = crash_data->ramdump_buf;
+ buf_len = crash_data->ramdump_buf_len;
+ memset(buf, 0, buf_len);
+
+ /* Reserve space for the header. */
+ hdr = (void *)buf;
+ buf += sizeof(*hdr);
+ buf_len -= sizeof(*hdr);
+
+ hdr->region_type = cpu_to_le32(current_region->type);
+ hdr->start = cpu_to_le32((unsigned long)ar->msa.vaddr);
+ hdr->length = cpu_to_le32(ar->msa.mem_size);
+
+ if (current_region->len < ar->msa.mem_size) {
+ memcpy(buf, ar->msa.vaddr, current_region->len);
+ ath10k_warn(ar, "msa dump length is less than msa size %x, %x\n",
+ current_region->len, ar->msa.mem_size);
+ } else {
+ memcpy(buf, ar->msa.vaddr, ar->msa.mem_size);
+ }
+}
+
+void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data;
+ char guid[UUID_STRING_LEN + 1];
+
+ mutex_lock(&ar->dump_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->stats.fw_crash_counter++;
+ spin_unlock_bh(&ar->data_lock);
+
+ crash_data = ath10k_coredump_new(ar);
+
+ if (crash_data)
+ scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
+ else
+ scnprintf(guid, sizeof(guid), "n/a");
+
+ ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
+ ath10k_print_driver_info(ar);
+ ath10k_msa_dump_memory(ar, crash_data);
+ mutex_unlock(&ar->dump_mutex);
+}
+
+static int ath10k_snoc_modem_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct ath10k_snoc *ar_snoc = container_of(nb, struct ath10k_snoc, nb);
+ struct ath10k *ar = ar_snoc->ar;
+ struct qcom_ssr_notify_data *notify_data = data;
+
+ switch (action) {
+ case QCOM_SSR_BEFORE_POWERUP:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem starting event\n");
+ clear_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags);
+ break;
+
+ case QCOM_SSR_AFTER_POWERUP:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem running event\n");
+ break;
+
+ case QCOM_SSR_BEFORE_SHUTDOWN:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem %s event\n",
+ notify_data->crashed ? "crashed" : "stopping");
+ if (!notify_data->crashed)
+ set_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags);
+ else
+ clear_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags);
+ break;
+
+ case QCOM_SSR_AFTER_SHUTDOWN:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem offline event\n");
+ break;
+
+ default:
+ ath10k_err(ar, "received unrecognized event %lu\n", action);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int ath10k_modem_init(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ void *notifier;
+ int ret;
+
+ ar_snoc->nb.notifier_call = ath10k_snoc_modem_notify;
+
+ notifier = qcom_register_ssr_notifier("mpss", &ar_snoc->nb);
+ if (IS_ERR(notifier)) {
+ ret = PTR_ERR(notifier);
+ ath10k_err(ar, "failed to initialize modem notifier: %d\n", ret);
+ return ret;
+ }
+
+ ar_snoc->notifier = notifier;
+
+ return 0;
+}
+
+static void ath10k_modem_deinit(struct ath10k *ar)
+{
+ int ret;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ ret = qcom_unregister_ssr_notifier(ar_snoc->notifier, &ar_snoc->nb);
+ if (ret)
+ ath10k_err(ar, "error %d unregistering notifier\n", ret);
+}
+
+static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
+{
+ struct device *dev = ar->dev;
+ struct resource r;
+ int ret;
+
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &r);
+ if (!ret) {
+ ar->msa.paddr = r.start;
+ ar->msa.mem_size = resource_size(&r);
+ ar->msa.vaddr = devm_memremap(dev, ar->msa.paddr,
+ ar->msa.mem_size,
+ MEMREMAP_WT);
+ if (IS_ERR(ar->msa.vaddr)) {
+ dev_err(dev, "failed to map memory region: %pa\n",
+ &r.start);
+ return PTR_ERR(ar->msa.vaddr);
+ }
+ } else {
+ ar->msa.vaddr = dmam_alloc_coherent(dev, msa_size,
+ &ar->msa.paddr,
+ GFP_KERNEL);
+ if (!ar->msa.vaddr) {
+ ath10k_err(ar, "failed to allocate dma memory for msa region\n");
+ return -ENOMEM;
+ }
+ ar->msa.mem_size = msa_size;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi msa.paddr: %pad , msa.vaddr: 0x%p\n",
+ &ar->msa.paddr,
+ ar->msa.vaddr);
+
+ return 0;
+}
+
+static int ath10k_fw_init(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct device *host_dev = &ar_snoc->dev->dev;
+ struct platform_device_info info;
+ struct iommu_domain *iommu_dom;
+ struct platform_device *pdev;
+ struct device_node *node;
+ int ret;
+
+ node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
+ if (!node) {
+ ar_snoc->use_tz = true;
+ return 0;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.fwnode = &node->fwnode;
+ info.parent = host_dev;
+ info.name = node->name;
+ info.dma_mask = DMA_BIT_MASK(32);
+
+ pdev = platform_device_register_full(&info);
+ if (IS_ERR(pdev)) {
+ of_node_put(node);
+ return PTR_ERR(pdev);
+ }
+
+ pdev->dev.of_node = node;
+
+ ret = of_dma_configure(&pdev->dev, node, true);
+ if (ret) {
+ ath10k_err(ar, "dma configure fail: %d\n", ret);
+ goto err_unregister;
+ }
+
+ ar_snoc->fw.dev = &pdev->dev;
+
+ iommu_dom = iommu_paging_domain_alloc(ar_snoc->fw.dev);
+ if (IS_ERR(iommu_dom)) {
+ ath10k_err(ar, "failed to allocate iommu domain\n");
+ ret = PTR_ERR(iommu_dom);
+ goto err_unregister;
+ }
+
+ ret = iommu_attach_device(iommu_dom, ar_snoc->fw.dev);
+ if (ret) {
+ ath10k_err(ar, "could not attach device: %d\n", ret);
+ goto err_iommu_free;
+ }
+
+ ar_snoc->fw.iommu_domain = iommu_dom;
+ ar_snoc->fw.fw_start_addr = ar->msa.paddr;
+
+ ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
+ ar->msa.paddr, ar->msa.mem_size,
+ IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
+ if (ret) {
+ ath10k_err(ar, "failed to map firmware region: %d\n", ret);
+ goto err_iommu_detach;
+ }
+
+ of_node_put(node);
+
+ return 0;
+
+err_iommu_detach:
+ iommu_detach_device(iommu_dom, ar_snoc->fw.dev);
+
+err_iommu_free:
+ iommu_domain_free(iommu_dom);
+
+err_unregister:
+ platform_device_unregister(pdev);
+ of_node_put(node);
+
+ return ret;
+}
+
+static int ath10k_fw_deinit(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ const size_t mapped_size = ar_snoc->fw.mapped_mem_size;
+ struct iommu_domain *iommu;
+ size_t unmapped_size;
+
+ if (ar_snoc->use_tz)
+ return 0;
+
+ iommu = ar_snoc->fw.iommu_domain;
+
+ unmapped_size = iommu_unmap(iommu, ar_snoc->fw.fw_start_addr,
+ mapped_size);
+ if (unmapped_size != mapped_size)
+ ath10k_err(ar, "failed to unmap firmware: %zu\n",
+ unmapped_size);
+
+ iommu_detach_device(iommu, ar_snoc->fw.dev);
+ iommu_domain_free(iommu);
+
+ platform_device_unregister(to_platform_device(ar_snoc->fw.dev));
+
+ return 0;
+}
+
+static const struct of_device_id ath10k_snoc_dt_match[] = {
+ { .compatible = "qcom,wcn3990-wifi",
+ .data = &drv_priv,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
+
+static int ath10k_snoc_probe(struct platform_device *pdev)
+{
+ const struct ath10k_snoc_drv_priv *drv_data;
+ struct ath10k_snoc *ar_snoc;
+ struct device *dev;
+ struct ath10k *ar;
+ u32 msa_size;
+ int ret;
+ u32 i;
+
+ dev = &pdev->dev;
+ drv_data = device_get_match_data(dev);
+ if (!drv_data) {
+ dev_err(dev, "failed to find matching device tree id\n");
+ return -EINVAL;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
+ if (ret) {
+ dev_err(dev, "failed to set dma mask: %d\n", ret);
+ return ret;
+ }
+
+ ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
+ drv_data->hw_rev, &ath10k_snoc_hif_ops);
+ if (!ar) {
+ dev_err(dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ ar_snoc = ath10k_snoc_priv(ar);
+ ar_snoc->dev = pdev;
+ platform_set_drvdata(pdev, ar);
+ ar_snoc->ar = ar;
+ ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
+ ar->ce_priv = &ar_snoc->ce;
+ msa_size = drv_data->msa_size;
+
+ ath10k_snoc_quirks_init(ar);
+
+ ret = ath10k_snoc_resource_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
+ goto err_core_destroy;
+ }
+
+ ret = ath10k_snoc_setup_resource(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup resource: %d\n", ret);
+ goto err_core_destroy;
+ }
+ ret = ath10k_snoc_request_irq(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request irqs: %d\n", ret);
+ goto err_release_resource;
+ }
+
+ ar_snoc->num_vregs = ARRAY_SIZE(ath10k_regulators);
+ ar_snoc->vregs = devm_kcalloc(&pdev->dev, ar_snoc->num_vregs,
+ sizeof(*ar_snoc->vregs), GFP_KERNEL);
+ if (!ar_snoc->vregs) {
+ ret = -ENOMEM;
+ goto err_free_irq;
+ }
+ for (i = 0; i < ar_snoc->num_vregs; i++)
+ ar_snoc->vregs[i].supply = ath10k_regulators[i];
+
+ ret = devm_regulator_bulk_get(&pdev->dev, ar_snoc->num_vregs,
+ ar_snoc->vregs);
+ if (ret < 0)
+ goto err_free_irq;
+
+ ar_snoc->num_clks = ARRAY_SIZE(ath10k_clocks);
+ ar_snoc->clks = devm_kcalloc(&pdev->dev, ar_snoc->num_clks,
+ sizeof(*ar_snoc->clks), GFP_KERNEL);
+ if (!ar_snoc->clks) {
+ ret = -ENOMEM;
+ goto err_free_irq;
+ }
+
+ for (i = 0; i < ar_snoc->num_clks; i++)
+ ar_snoc->clks[i].id = ath10k_clocks[i];
+
+ ret = devm_clk_bulk_get_optional(&pdev->dev, ar_snoc->num_clks,
+ ar_snoc->clks);
+ if (ret)
+ goto err_free_irq;
+
+ ret = ath10k_setup_msa_resources(ar, msa_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup msa resources: %d\n", ret);
+ goto err_free_irq;
+ }
+
+ ret = ath10k_fw_init(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize firmware: %d\n", ret);
+ goto err_free_irq;
+ }
+
+ ret = ath10k_qmi_init(ar, msa_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to register wlfw qmi client: %d\n", ret);
+ goto err_fw_deinit;
+ }
+
+ ret = ath10k_modem_init(ar);
+ if (ret)
+ goto err_qmi_deinit;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
+
+ return 0;
+
+err_qmi_deinit:
+ ath10k_qmi_deinit(ar);
+
+err_fw_deinit:
+ ath10k_fw_deinit(ar);
+
+err_free_irq:
+ ath10k_snoc_free_irq(ar);
+
+err_release_resource:
+ ath10k_snoc_release_resource(ar);
+
+err_core_destroy:
+ ath10k_core_destroy(ar);
+
+ return ret;
+}
+
+static int ath10k_snoc_free_resources(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc free resources\n");
+
+ set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags);
+
+ ath10k_core_unregister(ar);
+ ath10k_fw_deinit(ar);
+ ath10k_snoc_free_irq(ar);
+ ath10k_snoc_release_resource(ar);
+ ath10k_modem_deinit(ar);
+ ath10k_qmi_deinit(ar);
+ ath10k_core_destroy(ar);
+
+ return 0;
+}
+
+static void ath10k_snoc_remove(struct platform_device *pdev)
+{
+ struct ath10k *ar = platform_get_drvdata(pdev);
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
+
+ reinit_completion(&ar->driver_recovery);
+
+ if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
+ wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ);
+
+ ath10k_snoc_free_resources(ar);
+}
+
+static void ath10k_snoc_shutdown(struct platform_device *pdev)
+{
+ struct ath10k *ar = platform_get_drvdata(pdev);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc shutdown\n");
+ ath10k_snoc_free_resources(ar);
+}
+
+static struct platform_driver ath10k_snoc_driver = {
+ .probe = ath10k_snoc_probe,
+ .remove = ath10k_snoc_remove,
+ .shutdown = ath10k_snoc_shutdown,
+ .driver = {
+ .name = "ath10k_snoc",
+ .of_match_table = ath10k_snoc_dt_match,
+ },
+};
+module_platform_driver(ath10k_snoc_driver);
+
+MODULE_AUTHOR("Qualcomm");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
new file mode 100644
index 000000000000..d4bce1707696
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _SNOC_H_
+#define _SNOC_H_
+
+#include <linux/notifier.h>
+
+#include "hw.h"
+#include "ce.h"
+#include "qmi.h"
+
+struct ath10k_snoc_drv_priv {
+ enum ath10k_hw_rev hw_rev;
+ u64 dma_mask;
+ u32 msa_size;
+};
+
+struct snoc_state {
+ u32 pipe_cfg_addr;
+ u32 svc_to_pipe_map;
+};
+
+struct ath10k_snoc_pipe {
+ struct ath10k_ce_pipe *ce_hdl;
+ u8 pipe_num;
+ struct ath10k *hif_ce_state;
+ size_t buf_sz;
+ /* protect ce info */
+ spinlock_t pipe_lock;
+ struct ath10k_snoc *ar_snoc;
+};
+
+struct ath10k_snoc_target_info {
+ u32 target_version;
+ u32 target_type;
+ u32 target_revision;
+ u32 soc_version;
+};
+
+struct ath10k_snoc_ce_irq {
+ u32 irq_line;
+};
+
+enum ath10k_snoc_flags {
+ ATH10K_SNOC_FLAG_REGISTERED,
+ ATH10K_SNOC_FLAG_UNREGISTERING,
+ ATH10K_SNOC_FLAG_MODEM_STOPPED,
+ ATH10K_SNOC_FLAG_RECOVERY,
+ ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK,
+};
+
+struct clk_bulk_data;
+struct regulator_bulk_data;
+
+struct ath10k_snoc {
+ struct platform_device *dev;
+ struct ath10k *ar;
+ unsigned int use_tz;
+ struct ath10k_firmware {
+ struct device *dev;
+ dma_addr_t fw_start_addr;
+ struct iommu_domain *iommu_domain;
+ size_t mapped_mem_size;
+ } fw;
+ void __iomem *mem;
+ dma_addr_t mem_pa;
+ struct ath10k_snoc_target_info target_info;
+ size_t mem_len;
+ struct ath10k_snoc_pipe pipe_info[CE_COUNT_MAX];
+ struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX];
+ struct ath10k_ce ce;
+ struct timer_list rx_post_retry;
+ struct regulator_bulk_data *vregs;
+ size_t num_vregs;
+ struct clk_bulk_data *clks;
+ size_t num_clks;
+ struct ath10k_qmi *qmi;
+ struct notifier_block nb;
+ void *notifier;
+ unsigned long flags;
+ bool xo_cal_supported;
+ u32 xo_cal_data;
+ DECLARE_BITMAP(pending_ce_irqs, CE_COUNT_MAX);
+};
+
+static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
+{
+ return (struct ath10k_snoc *)ar->drv_priv;
+}
+
+int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type);
+void ath10k_snoc_fw_crashed_dump(struct ath10k *ar);
+
+#endif /* _SNOC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
new file mode 100644
index 000000000000..2240994390ed
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2013-2017 Qualcomm Atheros, Inc.
+ */
+
+#include <linux/relay.h>
+#include "core.h"
+#include "debug.h"
+#include "wmi-ops.h"
+
+static void send_fft_sample(struct ath10k *ar,
+ const struct fft_sample_tlv *fft_sample_tlv)
+{
+ int length;
+
+ if (!ar->spectral.rfs_chan_spec_scan)
+ return;
+
+ length = __be16_to_cpu(fft_sample_tlv->length) +
+ sizeof(*fft_sample_tlv);
+ relay_write(ar->spectral.rfs_chan_spec_scan, fft_sample_tlv, length);
+}
+
+static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len,
+ u8 *data)
+{
+ int dc_pos;
+ u8 max_exp;
+
+ dc_pos = bin_len / 2;
+
+ /* peak index outside of bins */
+ if (dc_pos < max_index || -dc_pos >= max_index)
+ return 0;
+
+ for (max_exp = 0; max_exp < 8; max_exp++) {
+ if (data[dc_pos + max_index] == (max_magnitude >> max_exp))
+ break;
+ }
+
+ /* max_exp not found */
+ if (data[dc_pos + max_index] != (max_magnitude >> max_exp))
+ return 0;
+
+ return max_exp;
+}
+
+static inline size_t ath10k_spectral_fix_bin_size(struct ath10k *ar,
+ size_t bin_len)
+{
+ /* some chipsets reports bin size as 2^n bytes + 'm' bytes in
+ * report mode 2. First 2^n bytes carries inband tones and last
+ * 'm' bytes carries band edge detection data mainly used in
+ * radar detection purpose. Strip last 'm' bytes to make bin size
+ * as a valid one. 'm' can take possible values of 4, 12.
+ */
+ if (!is_power_of_2(bin_len))
+ bin_len -= ar->hw_params.spectral_bin_discard;
+
+ return bin_len;
+}
+
+int ath10k_spectral_process_fft(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ const struct phyerr_fft_report *fftr,
+ size_t bin_len, u64 tsf)
+{
+ struct fft_sample_ath10k *fft_sample;
+ u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS];
+ u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag;
+ u32 reg0, reg1;
+ u8 chain_idx, *bins;
+ int dc_pos;
+
+ fft_sample = (struct fft_sample_ath10k *)&buf;
+
+ bin_len = ath10k_spectral_fix_bin_size(ar, bin_len);
+
+ if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS)
+ return -EINVAL;
+
+ reg0 = __le32_to_cpu(fftr->reg0);
+ reg1 = __le32_to_cpu(fftr->reg1);
+
+ length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + bin_len;
+ fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH10K;
+ fft_sample->tlv.length = __cpu_to_be16(length);
+
+ /* TODO: there might be a reason why the hardware reports 20/40/80 MHz,
+ * but the results/plots suggest that its actually 22/44/88 MHz.
+ */
+ switch (phyerr->chan_width_mhz) {
+ case 20:
+ fft_sample->chan_width_mhz = 22;
+ break;
+ case 40:
+ fft_sample->chan_width_mhz = 44;
+ break;
+ case 80:
+ /* TODO: As experiments with an analogue sender and various
+ * configurations (fft-sizes of 64/128/256 and 20/40/80 Mhz)
+ * show, the particular configuration of 80 MHz/64 bins does
+ * not match with the other samples at all. Until the reason
+ * for that is found, don't report these samples.
+ */
+ if (bin_len == 64)
+ return -EINVAL;
+ fft_sample->chan_width_mhz = 88;
+ break;
+ default:
+ fft_sample->chan_width_mhz = phyerr->chan_width_mhz;
+ }
+
+ fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB);
+ fft_sample->avgpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB);
+
+ peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
+ fft_sample->max_magnitude = __cpu_to_be16(peak_mag);
+ fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX);
+ fft_sample->rssi = phyerr->rssi_combined;
+
+ total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB);
+ base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB);
+ fft_sample->total_gain_db = __cpu_to_be16(total_gain_db);
+ fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db);
+
+ freq1 = phyerr->freq1;
+ freq2 = phyerr->freq2;
+ fft_sample->freq1 = __cpu_to_be16(freq1);
+ fft_sample->freq2 = __cpu_to_be16(freq2);
+
+ chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX);
+
+ fft_sample->noise = __cpu_to_be16(phyerr->nf_chains[chain_idx]);
+
+ bins = (u8 *)fftr;
+ bins += sizeof(*fftr) + ar->hw_params.spectral_bin_offset;
+
+ fft_sample->tsf = __cpu_to_be64(tsf);
+
+ /* max_exp has been directly reported by previous hardware (ath9k),
+ * maybe its possible to get it by other means?
+ */
+ fft_sample->max_exp = get_max_exp(fft_sample->max_index, peak_mag,
+ bin_len, bins);
+
+ memcpy(fft_sample->data, bins, bin_len);
+
+ /* DC value (value in the middle) is the blind spot of the spectral
+ * sample and invalid, interpolate it.
+ */
+ dc_pos = bin_len / 2;
+ fft_sample->data[dc_pos] = (fft_sample->data[dc_pos + 1] +
+ fft_sample->data[dc_pos - 1]) / 2;
+
+ send_fft_sample(ar, &fft_sample->tlv);
+
+ return 0;
+}
+
+static struct ath10k_vif *ath10k_get_spectral_vdev(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (list_empty(&ar->arvifs))
+ return NULL;
+
+ /* if there already is a vif doing spectral, return that. */
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ if (arvif->spectral_enabled)
+ return arvif;
+
+ /* otherwise, return the first vif. */
+ return list_first_entry(&ar->arvifs, typeof(*arvif), list);
+}
+
+static int ath10k_spectral_scan_trigger(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int res;
+ int vdev_id;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arvif = ath10k_get_spectral_vdev(ar);
+ if (!arvif)
+ return -ENODEV;
+ vdev_id = arvif->vdev_id;
+
+ if (ar->spectral.mode == SPECTRAL_DISABLED)
+ return 0;
+
+ res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
+ WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
+ WMI_SPECTRAL_ENABLE_CMD_ENABLE);
+ if (res < 0)
+ return res;
+
+ res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
+ WMI_SPECTRAL_TRIGGER_CMD_TRIGGER,
+ WMI_SPECTRAL_ENABLE_CMD_ENABLE);
+ if (res < 0)
+ return res;
+
+ return 0;
+}
+
+static int ath10k_spectral_scan_config(struct ath10k *ar,
+ enum ath10k_spectral_mode mode)
+{
+ struct wmi_vdev_spectral_conf_arg arg;
+ struct ath10k_vif *arvif;
+ int vdev_id, count, res = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arvif = ath10k_get_spectral_vdev(ar);
+ if (!arvif)
+ return -ENODEV;
+
+ vdev_id = arvif->vdev_id;
+
+ arvif->spectral_enabled = (mode != SPECTRAL_DISABLED);
+ ar->spectral.mode = mode;
+
+ res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
+ WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
+ WMI_SPECTRAL_ENABLE_CMD_DISABLE);
+ if (res < 0) {
+ ath10k_warn(ar, "failed to enable spectral scan: %d\n", res);
+ return res;
+ }
+
+ if (mode == SPECTRAL_DISABLED)
+ return 0;
+
+ if (mode == SPECTRAL_BACKGROUND)
+ count = WMI_SPECTRAL_COUNT_DEFAULT;
+ else
+ count = max_t(u8, 1, ar->spectral.config.count);
+
+ arg.vdev_id = vdev_id;
+ arg.scan_count = count;
+ arg.scan_period = WMI_SPECTRAL_PERIOD_DEFAULT;
+ arg.scan_priority = WMI_SPECTRAL_PRIORITY_DEFAULT;
+ arg.scan_fft_size = ar->spectral.config.fft_size;
+ arg.scan_gc_ena = WMI_SPECTRAL_GC_ENA_DEFAULT;
+ arg.scan_restart_ena = WMI_SPECTRAL_RESTART_ENA_DEFAULT;
+ arg.scan_noise_floor_ref = WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
+ arg.scan_init_delay = WMI_SPECTRAL_INIT_DELAY_DEFAULT;
+ arg.scan_nb_tone_thr = WMI_SPECTRAL_NB_TONE_THR_DEFAULT;
+ arg.scan_str_bin_thr = WMI_SPECTRAL_STR_BIN_THR_DEFAULT;
+ arg.scan_wb_rpt_mode = WMI_SPECTRAL_WB_RPT_MODE_DEFAULT;
+ arg.scan_rssi_rpt_mode = WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT;
+ arg.scan_rssi_thr = WMI_SPECTRAL_RSSI_THR_DEFAULT;
+ arg.scan_pwr_format = WMI_SPECTRAL_PWR_FORMAT_DEFAULT;
+ arg.scan_rpt_mode = WMI_SPECTRAL_RPT_MODE_DEFAULT;
+ arg.scan_bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
+ arg.scan_dbm_adj = WMI_SPECTRAL_DBM_ADJ_DEFAULT;
+ arg.scan_chn_mask = WMI_SPECTRAL_CHN_MASK_DEFAULT;
+
+ res = ath10k_wmi_vdev_spectral_conf(ar, &arg);
+ if (res < 0) {
+ ath10k_warn(ar, "failed to configure spectral scan: %d\n", res);
+ return res;
+ }
+
+ return 0;
+}
+
+static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char *mode = "";
+ size_t len;
+ enum ath10k_spectral_mode spectral_mode;
+
+ mutex_lock(&ar->conf_mutex);
+ spectral_mode = ar->spectral.mode;
+ mutex_unlock(&ar->conf_mutex);
+
+ switch (spectral_mode) {
+ case SPECTRAL_DISABLED:
+ mode = "disable";
+ break;
+ case SPECTRAL_BACKGROUND:
+ mode = "background";
+ break;
+ case SPECTRAL_MANUAL:
+ mode = "manual";
+ break;
+ }
+
+ len = strlen(mode);
+ return simple_read_from_buffer(user_buf, count, ppos, mode, len);
+}
+
+static ssize_t write_file_spec_scan_ctl(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ ssize_t len;
+ int res;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (strncmp("trigger", buf, 7) == 0) {
+ if (ar->spectral.mode == SPECTRAL_MANUAL ||
+ ar->spectral.mode == SPECTRAL_BACKGROUND) {
+ /* reset the configuration to adopt possibly changed
+ * debugfs parameters
+ */
+ res = ath10k_spectral_scan_config(ar,
+ ar->spectral.mode);
+ if (res < 0) {
+ ath10k_warn(ar, "failed to reconfigure spectral scan: %d\n",
+ res);
+ }
+ res = ath10k_spectral_scan_trigger(ar);
+ if (res < 0) {
+ ath10k_warn(ar, "failed to trigger spectral scan: %d\n",
+ res);
+ }
+ } else {
+ res = -EINVAL;
+ }
+ } else if (strncmp("background", buf, 10) == 0) {
+ res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND);
+ } else if (strncmp("manual", buf, 6) == 0) {
+ res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL);
+ } else if (strncmp("disable", buf, 7) == 0) {
+ res = ath10k_spectral_scan_config(ar, SPECTRAL_DISABLED);
+ } else {
+ res = -EINVAL;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ if (res < 0)
+ return res;
+
+ return count;
+}
+
+static const struct file_operations fops_spec_scan_ctl = {
+ .read = read_file_spec_scan_ctl,
+ .write = write_file_spec_scan_ctl,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_count(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+ u8 spectral_count;
+
+ mutex_lock(&ar->conf_mutex);
+ spectral_count = ar->spectral.config.count;
+ mutex_unlock(&ar->conf_mutex);
+
+ len = sprintf(buf, "%d\n", spectral_count);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_count(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long val;
+ ssize_t ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val > 255)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->spectral.config.count = val;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_spectral_count = {
+ .read = read_file_spectral_count,
+ .write = write_file_spectral_count,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_bins(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ unsigned int bins, fft_size, bin_scale;
+ size_t len;
+
+ mutex_lock(&ar->conf_mutex);
+
+ fft_size = ar->spectral.config.fft_size;
+ bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
+ bins = 1 << (fft_size - bin_scale);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ len = sprintf(buf, "%d\n", bins);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_bins(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long val;
+ ssize_t ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val < 64 || val > SPECTRAL_ATH10K_MAX_NUM_BINS)
+ return -EINVAL;
+
+ if (!is_power_of_2(val))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->spectral.config.fft_size = ilog2(val);
+ ar->spectral.config.fft_size += WMI_SPECTRAL_BIN_SCALE_DEFAULT;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_spectral_bins = {
+ .read = read_file_spectral_bins,
+ .write = write_file_spectral_bins,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static struct dentry *create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ buf_file = debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+ if (IS_ERR(buf_file))
+ return NULL;
+
+ *is_global = 1;
+ return buf_file;
+}
+
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+
+ return 0;
+}
+
+static const struct rchan_callbacks rfs_spec_scan_cb = {
+ .create_buf_file = create_buf_file_handler,
+ .remove_buf_file = remove_buf_file_handler,
+};
+
+int ath10k_spectral_start(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ arvif->spectral_enabled = 0;
+
+ ar->spectral.mode = SPECTRAL_DISABLED;
+ ar->spectral.config.count = WMI_SPECTRAL_COUNT_DEFAULT;
+ ar->spectral.config.fft_size = WMI_SPECTRAL_FFT_SIZE_DEFAULT;
+
+ return 0;
+}
+
+int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
+{
+ if (!arvif->spectral_enabled)
+ return 0;
+
+ return ath10k_spectral_scan_config(arvif->ar, SPECTRAL_DISABLED);
+}
+
+int ath10k_spectral_create(struct ath10k *ar)
+{
+ /* The buffer size covers whole channels in dual bands up to 128 bins.
+ * Scan with bigger than 128 bins needs to be run on single band each.
+ */
+ ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan",
+ ar->debug.debugfs_phy,
+ 1140, 2500,
+ &rfs_spec_scan_cb, NULL);
+ debugfs_create_file("spectral_scan_ctl",
+ 0600,
+ ar->debug.debugfs_phy, ar,
+ &fops_spec_scan_ctl);
+ debugfs_create_file("spectral_count",
+ 0600,
+ ar->debug.debugfs_phy, ar,
+ &fops_spectral_count);
+ debugfs_create_file("spectral_bins",
+ 0600,
+ ar->debug.debugfs_phy, ar,
+ &fops_spectral_bins);
+
+ return 0;
+}
+
+void ath10k_spectral_destroy(struct ath10k *ar)
+{
+ if (ar->spectral.rfs_chan_spec_scan) {
+ relay_close(ar->spectral.rfs_chan_spec_scan);
+ ar->spectral.rfs_chan_spec_scan = NULL;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h
new file mode 100644
index 000000000000..5f481f11c6e5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/spectral.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2013-2015 Qualcomm Atheros, Inc.
+ */
+
+#ifndef SPECTRAL_H
+#define SPECTRAL_H
+
+#include "../spectral_common.h"
+
+/**
+ * struct ath10k_spec_scan - parameters for Atheros spectral scan
+ *
+ * @count: number of scan results requested for manual mode
+ * @fft_size: number of bins to be requested = 2^(fft_size - bin_scale)
+ */
+struct ath10k_spec_scan {
+ u8 count;
+ u8 fft_size;
+};
+
+/* enum ath10k_spectral_mode:
+ *
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ * something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ * is performed manually.
+ */
+enum ath10k_spectral_mode {
+ SPECTRAL_DISABLED = 0,
+ SPECTRAL_BACKGROUND,
+ SPECTRAL_MANUAL,
+};
+
+#ifdef CONFIG_ATH10K_SPECTRAL
+
+int ath10k_spectral_process_fft(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ const struct phyerr_fft_report *fftr,
+ size_t bin_len, u64 tsf);
+int ath10k_spectral_start(struct ath10k *ar);
+int ath10k_spectral_vif_stop(struct ath10k_vif *arvif);
+int ath10k_spectral_create(struct ath10k *ar);
+void ath10k_spectral_destroy(struct ath10k *ar);
+
+#else
+
+static inline int
+ath10k_spectral_process_fft(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ const struct phyerr_fft_report *fftr,
+ size_t bin_len, u64 tsf)
+{
+ return 0;
+}
+
+static inline int ath10k_spectral_start(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
+{
+ return 0;
+}
+
+static inline int ath10k_spectral_create(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_spectral_destroy(struct ath10k *ar)
+{
+}
+
+#endif /* CONFIG_ATH10K_SPECTRAL */
+
+#endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
new file mode 100644
index 000000000000..7198a386f2fb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
+ */
+
+/* This file has implementation for code swap logic. With code swap feature,
+ * target can run the fw binary with even smaller IRAM size by using host
+ * memory to store some of the code segments.
+ */
+
+#include "core.h"
+#include "bmi.h"
+#include "debug.h"
+
+static int ath10k_swap_code_seg_fill(struct ath10k *ar,
+ struct ath10k_swap_code_seg_info *seg_info,
+ const void *data, size_t data_len)
+{
+ u8 *virt_addr = seg_info->virt_address[0];
+ u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {};
+ const u8 *fw_data = data;
+ union ath10k_swap_code_seg_item *swap_item;
+ u32 length = 0;
+ u32 payload_len;
+ u32 total_payload_len = 0;
+ u32 size_left = data_len;
+
+ /* Parse swap bin and copy the content to host allocated memory.
+ * The format is Address, length and value. The last 4-bytes is
+ * target write address. Currently address field is not used.
+ */
+ seg_info->target_addr = -1;
+ while (size_left >= sizeof(*swap_item)) {
+ swap_item = (union ath10k_swap_code_seg_item *)fw_data;
+ payload_len = __le32_to_cpu(swap_item->tlv.length);
+ if ((payload_len > size_left) ||
+ (payload_len == 0 &&
+ size_left != sizeof(struct ath10k_swap_code_seg_tail))) {
+ ath10k_err(ar, "refusing to parse invalid tlv length %d\n",
+ payload_len);
+ return -EINVAL;
+ }
+
+ if (payload_len == 0) {
+ if (memcmp(swap_item->tail.magic_signature, swap_magic,
+ ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) {
+ ath10k_err(ar, "refusing an invalid swap file\n");
+ return -EINVAL;
+ }
+ seg_info->target_addr =
+ __le32_to_cpu(swap_item->tail.bmi_write_addr);
+ break;
+ }
+
+ memcpy(virt_addr, swap_item->tlv.data, payload_len);
+ virt_addr += payload_len;
+ length = payload_len + sizeof(struct ath10k_swap_code_seg_tlv);
+ size_left -= length;
+ fw_data += length;
+ total_payload_len += payload_len;
+ }
+
+ if (seg_info->target_addr == -1) {
+ ath10k_err(ar, "failed to parse invalid swap file\n");
+ return -EINVAL;
+ }
+ seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len);
+
+ return 0;
+}
+
+static void
+ath10k_swap_code_seg_free(struct ath10k *ar,
+ struct ath10k_swap_code_seg_info *seg_info)
+{
+ u32 seg_size;
+
+ if (!seg_info)
+ return;
+
+ if (!seg_info->virt_address[0])
+ return;
+
+ seg_size = __le32_to_cpu(seg_info->seg_hw_info.size);
+ dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0],
+ seg_info->paddr[0]);
+}
+
+static struct ath10k_swap_code_seg_info *
+ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
+{
+ struct ath10k_swap_code_seg_info *seg_info;
+ void *virt_addr;
+ dma_addr_t paddr;
+
+ swap_bin_len = roundup(swap_bin_len, 2);
+ if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) {
+ ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n",
+ swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX);
+ return NULL;
+ }
+
+ seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL);
+ if (!seg_info)
+ return NULL;
+
+ virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr,
+ GFP_KERNEL);
+ if (!virt_addr)
+ return NULL;
+
+ seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr);
+ seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len);
+ seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len);
+ seg_info->seg_hw_info.num_segs =
+ __cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED);
+ seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len));
+ seg_info->virt_address[0] = virt_addr;
+ seg_info->paddr[0] = paddr;
+
+ return seg_info;
+}
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+ const struct ath10k_fw_file *fw_file)
+{
+ int ret;
+ struct ath10k_swap_code_seg_info *seg_info = NULL;
+
+ if (!fw_file->firmware_swap_code_seg_info)
+ return 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
+
+ seg_info = fw_file->firmware_swap_code_seg_info;
+
+ ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
+ &seg_info->seg_hw_info,
+ sizeof(seg_info->seg_hw_info));
+ if (ret) {
+ ath10k_err(ar, "failed to write Code swap segment information (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath10k_swap_code_seg_release(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file)
+{
+ ath10k_swap_code_seg_free(ar, fw_file->firmware_swap_code_seg_info);
+
+ /* FIXME: these two assignments look to bein wrong place! Shouldn't
+ * they be in ath10k_core_free_firmware_files() like the rest?
+ */
+ fw_file->codeswap_data = NULL;
+ fw_file->codeswap_len = 0;
+
+ fw_file->firmware_swap_code_seg_info = NULL;
+}
+
+int ath10k_swap_code_seg_init(struct ath10k *ar, struct ath10k_fw_file *fw_file)
+{
+ int ret;
+ struct ath10k_swap_code_seg_info *seg_info;
+ const void *codeswap_data;
+ size_t codeswap_len;
+
+ codeswap_data = fw_file->codeswap_data;
+ codeswap_len = fw_file->codeswap_len;
+
+ if (!codeswap_len || !codeswap_data)
+ return 0;
+
+ seg_info = ath10k_swap_code_seg_alloc(ar, codeswap_len);
+ if (!seg_info) {
+ ath10k_err(ar, "failed to allocate fw code swap segment\n");
+ return -ENOMEM;
+ }
+
+ ret = ath10k_swap_code_seg_fill(ar, seg_info,
+ codeswap_data, codeswap_len);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
+ ret);
+ ath10k_swap_code_seg_free(ar, seg_info);
+ return ret;
+ }
+
+ fw_file->firmware_swap_code_seg_info = seg_info;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
new file mode 100644
index 000000000000..b4733b5ded34
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
+ */
+
+#ifndef _SWAP_H_
+#define _SWAP_H_
+
+#define ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX (512 * 1024)
+#define ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ 12
+#define ATH10K_SWAP_CODE_SEG_NUM_MAX 16
+/* Currently only one swap segment is supported */
+#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED 1
+
+struct ath10k_fw_file;
+
+struct ath10k_swap_code_seg_tlv {
+ __le32 address;
+ __le32 length;
+ u8 data[];
+} __packed;
+
+struct ath10k_swap_code_seg_tail {
+ u8 magic_signature[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ];
+ __le32 bmi_write_addr;
+} __packed;
+
+union ath10k_swap_code_seg_item {
+ struct ath10k_swap_code_seg_tlv tlv;
+ struct ath10k_swap_code_seg_tail tail;
+} __packed;
+
+struct ath10k_swap_code_seg_hw_info {
+ /* Swap binary image size */
+ __le32 swap_size;
+ __le32 num_segs;
+
+ /* Swap data size */
+ __le32 size;
+ __le32 size_log2;
+ __le32 bus_addr[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+ __le64 reserved[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+} __packed;
+
+struct ath10k_swap_code_seg_info {
+ struct ath10k_swap_code_seg_hw_info seg_hw_info;
+ void *virt_address[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+ u32 target_addr;
+ dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+};
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+ const struct ath10k_fw_file *fw_file);
+void ath10k_swap_code_seg_release(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file);
+int ath10k_swap_code_seg_init(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
new file mode 100644
index 000000000000..ba37e6c7ced0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -0,0 +1,497 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
+ */
+
+#ifndef __TARGADDRS_H__
+#define __TARGADDRS_H__
+
+#include "hw.h"
+
+/*
+ * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
+ * host_interest structure. It must match the address of the _host_interest
+ * symbol (see linker script).
+ *
+ * Host Interest is shared between Host and Target in order to coordinate
+ * between the two, and is intended to remain constant (with additions only
+ * at the end) across software releases.
+ *
+ * All addresses are available here so that it's possible to
+ * write a single binary that works with all Target Types.
+ * May be used in assembler code as well as C.
+ */
+#define QCA988X_HOST_INTEREST_ADDRESS 0x00400800
+#define HOST_INTEREST_MAX_SIZE 0x200
+
+/*
+ * These are items that the Host may need to access via BMI or via the
+ * Diagnostic Window. The position of items in this structure must remain
+ * constant across firmware revisions! Types for each item must be fixed
+ * size across target and host platforms. More items may be added at the end.
+ */
+struct host_interest {
+ /*
+ * Pointer to application-defined area, if any.
+ * Set by Target application during startup.
+ */
+ u32 hi_app_host_interest; /* 0x00 */
+
+ /* Pointer to register dump area, valid after Target crash. */
+ u32 hi_failure_state; /* 0x04 */
+
+ /* Pointer to debug logging header */
+ u32 hi_dbglog_hdr; /* 0x08 */
+
+ u32 hi_unused0c; /* 0x0c */
+
+ /*
+ * General-purpose flag bits, similar to SOC_OPTION_* flags.
+ * Can be used by application rather than by OS.
+ */
+ u32 hi_option_flag; /* 0x10 */
+
+ /*
+ * Boolean that determines whether or not to
+ * display messages on the serial port.
+ */
+ u32 hi_serial_enable; /* 0x14 */
+
+ /* Start address of DataSet index, if any */
+ u32 hi_dset_list_head; /* 0x18 */
+
+ /* Override Target application start address */
+ u32 hi_app_start; /* 0x1c */
+
+ /* Clock and voltage tuning */
+ u32 hi_skip_clock_init; /* 0x20 */
+ u32 hi_core_clock_setting; /* 0x24 */
+ u32 hi_cpu_clock_setting; /* 0x28 */
+ u32 hi_system_sleep_setting; /* 0x2c */
+ u32 hi_xtal_control_setting; /* 0x30 */
+ u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */
+ u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */
+ u32 hi_ref_voltage_trim_setting; /* 0x3c */
+ u32 hi_clock_info; /* 0x40 */
+
+ /* Host uses BE CPU or not */
+ u32 hi_be; /* 0x44 */
+
+ u32 hi_stack; /* normal stack */ /* 0x48 */
+ u32 hi_err_stack; /* error stack */ /* 0x4c */
+ u32 hi_desired_cpu_speed_hz; /* 0x50 */
+
+ /* Pointer to Board Data */
+ u32 hi_board_data; /* 0x54 */
+
+ /*
+ * Indication of Board Data state:
+ * 0: board data is not yet initialized.
+ * 1: board data is initialized; unknown size
+ * >1: number of bytes of initialized board data
+ */
+ u32 hi_board_data_initialized; /* 0x58 */
+
+ u32 hi_dset_ram_index_table; /* 0x5c */
+
+ u32 hi_desired_baud_rate; /* 0x60 */
+ u32 hi_dbglog_config; /* 0x64 */
+ u32 hi_end_ram_reserve_sz; /* 0x68 */
+ u32 hi_mbox_io_block_sz; /* 0x6c */
+
+ u32 hi_num_bpatch_streams; /* 0x70 -- unused */
+ u32 hi_mbox_isr_yield_limit; /* 0x74 */
+
+ u32 hi_refclk_hz; /* 0x78 */
+ u32 hi_ext_clk_detected; /* 0x7c */
+ u32 hi_dbg_uart_txpin; /* 0x80 */
+ u32 hi_dbg_uart_rxpin; /* 0x84 */
+ u32 hi_hci_uart_baud; /* 0x88 */
+ u32 hi_hci_uart_pin_assignments; /* 0x8C */
+
+ u32 hi_hci_uart_baud_scale_val; /* 0x90 */
+ u32 hi_hci_uart_baud_step_val; /* 0x94 */
+
+ u32 hi_allocram_start; /* 0x98 */
+ u32 hi_allocram_sz; /* 0x9c */
+ u32 hi_hci_bridge_flags; /* 0xa0 */
+ u32 hi_hci_uart_support_pins; /* 0xa4 */
+
+ u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */
+
+ /*
+ * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
+ * [31:16]: wakeup timeout in ms
+ */
+ /* Pointer to extended board Data */
+ u32 hi_board_ext_data; /* 0xac */
+ u32 hi_board_ext_data_config; /* 0xb0 */
+ /*
+ * Bit [0] : valid
+ * Bit[31:16: size
+ */
+ /*
+ * hi_reset_flag is used to do some stuff when target reset.
+ * such as restore app_start after warm reset or
+ * preserve host Interest area, or preserve ROM data, literals etc.
+ */
+ u32 hi_reset_flag; /* 0xb4 */
+ /* indicate hi_reset_flag is valid */
+ u32 hi_reset_flag_valid; /* 0xb8 */
+ u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */
+ /* 0xbc - [31:0]: idle timeout in ms */
+ /* ACS flags */
+ u32 hi_acs_flags; /* 0xc0 */
+ u32 hi_console_flags; /* 0xc4 */
+ u32 hi_nvram_state; /* 0xc8 */
+ u32 hi_option_flag2; /* 0xcc */
+
+ /* If non-zero, override values sent to Host in WMI_READY event. */
+ u32 hi_sw_version_override; /* 0xd0 */
+ u32 hi_abi_version_override; /* 0xd4 */
+
+ /*
+ * Percentage of high priority RX traffic to total expected RX traffic
+ * applicable only to ar6004
+ */
+ u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
+
+ /* test applications flags */
+ u32 hi_test_apps_related; /* 0xdc */
+ /* location of test script */
+ u32 hi_ota_testscript; /* 0xe0 */
+ /* location of CAL data */
+ u32 hi_cal_data; /* 0xe4 */
+
+ /* Number of packet log buffers */
+ u32 hi_pktlog_num_buffers; /* 0xe8 */
+
+ /* wow extension configuration */
+ u32 hi_wow_ext_config; /* 0xec */
+ u32 hi_pwr_save_flags; /* 0xf0 */
+
+ /* Spatial Multiplexing Power Save (SMPS) options */
+ u32 hi_smps_options; /* 0xf4 */
+
+ /* Interconnect-specific state */
+ u32 hi_interconnect_state; /* 0xf8 */
+
+ /* Coex configuration flags */
+ u32 hi_coex_config; /* 0xfc */
+
+ /* Early allocation support */
+ u32 hi_early_alloc; /* 0x100 */
+ /* FW swap field */
+ /*
+ * Bits of this 32bit word will be used to pass specific swap
+ * instruction to FW
+ */
+ /*
+ * Bit 0 -- AP Nart descriptor no swap. When this bit is set
+ * FW will not swap TX descriptor. Meaning packets are formed
+ * on the target processor.
+ */
+ /* Bit 1 - unused */
+ u32 hi_fw_swap; /* 0x104 */
+
+ /* global arenas pointer address, used by host driver debug */
+ u32 hi_dynamic_mem_arenas_addr; /* 0x108 */
+
+ /* allocated bytes of DRAM use by allocated */
+ u32 hi_dynamic_mem_allocated; /* 0x10C */
+
+ /* remaining bytes of DRAM */
+ u32 hi_dynamic_mem_remaining; /* 0x110 */
+
+ /* memory track count, configured by host */
+ u32 hi_dynamic_mem_track_max; /* 0x114 */
+
+ /* minidump buffer */
+ u32 hi_minidump; /* 0x118 */
+
+ /* bdata's sig and key addr */
+ u32 hi_bd_sig_key; /* 0x11c */
+} __packed;
+
+#define HI_ITEM(item) offsetof(struct host_interest, item)
+
+/* Bits defined in hi_option_flag */
+
+/* Enable timer workaround */
+#define HI_OPTION_TIMER_WAR 0x01
+/* Limit BMI command credits */
+#define HI_OPTION_BMI_CRED_LIMIT 0x02
+/* Relay Dot11 hdr to/from host */
+#define HI_OPTION_RELAY_DOT11_HDR 0x04
+/* MAC addr method 0-locally administred 1-globally unique addrs */
+#define HI_OPTION_MAC_ADDR_METHOD 0x08
+/* Firmware Bridging */
+#define HI_OPTION_FW_BRIDGE 0x10
+/* Enable CPU profiling */
+#define HI_OPTION_ENABLE_PROFILE 0x20
+/* Disable debug logging */
+#define HI_OPTION_DISABLE_DBGLOG 0x40
+/* Skip Era Tracking */
+#define HI_OPTION_SKIP_ERA_TRACKING 0x80
+/* Disable PAPRD (debug) */
+#define HI_OPTION_PAPRD_DISABLE 0x100
+#define HI_OPTION_NUM_DEV_LSB 0x200
+#define HI_OPTION_NUM_DEV_MSB 0x800
+#define HI_OPTION_DEV_MODE_LSB 0x1000
+#define HI_OPTION_DEV_MODE_MSB 0x8000000
+/* Disable LowFreq Timer Stabilization */
+#define HI_OPTION_NO_LFT_STBL 0x10000000
+/* Skip regulatory scan */
+#define HI_OPTION_SKIP_REG_SCAN 0x20000000
+/*
+ * Do regulatory scan during init before
+ * sending WMI ready event to host
+ */
+#define HI_OPTION_INIT_REG_SCAN 0x40000000
+
+/* REV6: Do not adjust memory map */
+#define HI_OPTION_SKIP_MEMMAP 0x80000000
+
+#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
+
+/* 2 bits of hi_option_flag are used to represent 3 modes */
+#define HI_OPTION_FW_MODE_IBSS 0x0 /* IBSS Mode */
+#define HI_OPTION_FW_MODE_BSS_STA 0x1 /* STA Mode */
+#define HI_OPTION_FW_MODE_AP 0x2 /* AP Mode */
+#define HI_OPTION_FW_MODE_BT30AMP 0x3 /* BT30 AMP Mode */
+
+/* 2 bits of hi_option flag are usedto represent 4 submodes */
+#define HI_OPTION_FW_SUBMODE_NONE 0x0 /* Normal mode */
+#define HI_OPTION_FW_SUBMODE_P2PDEV 0x1 /* p2p device mode */
+#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2 /* p2p client mode */
+#define HI_OPTION_FW_SUBMODE_P2PGO 0x3 /* p2p go mode */
+
+/* Num dev Mask */
+#define HI_OPTION_NUM_DEV_MASK 0x7
+#define HI_OPTION_NUM_DEV_SHIFT 0x9
+
+/* firmware bridging */
+#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
+
+/*
+ * Fw Mode/SubMode Mask
+ *-----------------------------------------------------------------------------
+ * SUB | SUB | SUB | SUB | | | |
+ *MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]
+ * (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2)
+ *-----------------------------------------------------------------------------
+ */
+#define HI_OPTION_FW_MODE_BITS 0x2
+#define HI_OPTION_FW_MODE_MASK 0x3
+#define HI_OPTION_FW_MODE_SHIFT 0xC
+#define HI_OPTION_ALL_FW_MODE_MASK 0xFF
+
+#define HI_OPTION_FW_SUBMODE_BITS 0x2
+#define HI_OPTION_FW_SUBMODE_MASK 0x3
+#define HI_OPTION_FW_SUBMODE_SHIFT 0x14
+#define HI_OPTION_ALL_FW_SUBMODE_MASK 0xFF00
+#define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8
+
+/* hi_option_flag2 options */
+#define HI_OPTION_OFFLOAD_AMSDU 0x01
+#define HI_OPTION_DFS_SUPPORT 0x02 /* Enable DFS support */
+#define HI_OPTION_ENABLE_RFKILL 0x04 /* RFKill Enable Feature*/
+#define HI_OPTION_RADIO_RETENTION_DISABLE 0x08 /* Disable radio retention */
+#define HI_OPTION_EARLY_CFG_DONE 0x10 /* Early configuration is complete */
+
+#define HI_OPTION_RF_KILL_SHIFT 0x2
+#define HI_OPTION_RF_KILL_MASK 0x1
+
+/* hi_reset_flag */
+/* preserve App Start address */
+#define HI_RESET_FLAG_PRESERVE_APP_START 0x01
+/* preserve host interest */
+#define HI_RESET_FLAG_PRESERVE_HOST_INTEREST 0x02
+/* preserve ROM data */
+#define HI_RESET_FLAG_PRESERVE_ROMDATA 0x04
+#define HI_RESET_FLAG_PRESERVE_NVRAM_STATE 0x08
+#define HI_RESET_FLAG_PRESERVE_BOOT_INFO 0x10
+#define HI_RESET_FLAG_WARM_RESET 0x20
+
+/* define hi_fw_swap bits */
+#define HI_DESC_IN_FW_BIT 0x01
+
+/* indicate the reset flag is valid */
+#define HI_RESET_FLAG_IS_VALID 0x12345678
+
+/* ACS is enabled */
+#define HI_ACS_FLAGS_ENABLED (1 << 0)
+/* Use physical WWAN device */
+#define HI_ACS_FLAGS_USE_WWAN (1 << 1)
+/* Use test VAP */
+#define HI_ACS_FLAGS_TEST_VAP (1 << 2)
+/* SDIO/mailbox ACS flag definitions */
+#define HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET (1 << 0)
+#define HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET (1 << 1)
+#define HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE (1 << 2)
+#define HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK (1 << 16)
+#define HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK (1 << 17)
+
+/*
+ * If both SDIO_CRASH_DUMP_ENHANCEMENT_HOST and SDIO_CRASH_DUMP_ENHANCEMENT_FW
+ * flags are set, then crashdump upload will be done using the BMI host/target
+ * communication channel.
+ */
+/* HOST to support using BMI dump FW memory when hit assert */
+#define HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_HOST 0x400
+
+/* FW to support using BMI dump FW memory when hit assert */
+#define HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW 0x800
+
+/*
+ * CONSOLE FLAGS
+ *
+ * Bit Range Meaning
+ * --------- --------------------------------
+ * 2..0 UART ID (0 = Default)
+ * 3 Baud Select (0 = 9600, 1 = 115200)
+ * 30..4 Reserved
+ * 31 Enable Console
+ *
+ */
+
+#define HI_CONSOLE_FLAGS_ENABLE (1 << 31)
+#define HI_CONSOLE_FLAGS_UART_MASK (0x7)
+#define HI_CONSOLE_FLAGS_UART_SHIFT 0
+#define HI_CONSOLE_FLAGS_BAUD_SELECT (1 << 3)
+
+/* SM power save options */
+#define HI_SMPS_ALLOW_MASK (0x00000001)
+#define HI_SMPS_MODE_MASK (0x00000002)
+#define HI_SMPS_MODE_STATIC (0x00000000)
+#define HI_SMPS_MODE_DYNAMIC (0x00000002)
+#define HI_SMPS_DISABLE_AUTO_MODE (0x00000004)
+#define HI_SMPS_DATA_THRESH_MASK (0x000007f8)
+#define HI_SMPS_DATA_THRESH_SHIFT (3)
+#define HI_SMPS_RSSI_THRESH_MASK (0x0007f800)
+#define HI_SMPS_RSSI_THRESH_SHIFT (11)
+#define HI_SMPS_LOWPWR_CM_MASK (0x00380000)
+#define HI_SMPS_LOWPWR_CM_SHIFT (15)
+#define HI_SMPS_HIPWR_CM_MASK (0x03c00000)
+#define HI_SMPS_HIPWR_CM_SHIFT (19)
+
+/*
+ * WOW Extension configuration
+ *
+ * Bit Range Meaning
+ * --------- --------------------------------
+ * 8..0 Size of each WOW pattern (max 511)
+ * 15..9 Number of patterns per list (max 127)
+ * 17..16 Number of lists (max 4)
+ * 30..18 Reserved
+ * 31 Enabled
+ *
+ * set values (except enable) to zeros for default settings
+ */
+
+#define HI_WOW_EXT_ENABLED_MASK (1 << 31)
+#define HI_WOW_EXT_NUM_LIST_SHIFT 16
+#define HI_WOW_EXT_NUM_LIST_MASK (0x3 << HI_WOW_EXT_NUM_LIST_SHIFT)
+#define HI_WOW_EXT_NUM_PATTERNS_SHIFT 9
+#define HI_WOW_EXT_NUM_PATTERNS_MASK (0x7F << HI_WOW_EXT_NUM_PATTERNS_SHIFT)
+#define HI_WOW_EXT_PATTERN_SIZE_SHIFT 0
+#define HI_WOW_EXT_PATTERN_SIZE_MASK (0x1FF << HI_WOW_EXT_PATTERN_SIZE_SHIFT)
+
+#define HI_WOW_EXT_MAKE_CONFIG(num_lists, count, size) \
+ ((((num_lists) << HI_WOW_EXT_NUM_LIST_SHIFT) & \
+ HI_WOW_EXT_NUM_LIST_MASK) | \
+ (((count) << HI_WOW_EXT_NUM_PATTERNS_SHIFT) & \
+ HI_WOW_EXT_NUM_PATTERNS_MASK) | \
+ (((size) << HI_WOW_EXT_PATTERN_SIZE_SHIFT) & \
+ HI_WOW_EXT_PATTERN_SIZE_MASK))
+
+#define HI_WOW_EXT_GET_NUM_LISTS(config) \
+ (((config) & HI_WOW_EXT_NUM_LIST_MASK) >> HI_WOW_EXT_NUM_LIST_SHIFT)
+#define HI_WOW_EXT_GET_NUM_PATTERNS(config) \
+ (((config) & HI_WOW_EXT_NUM_PATTERNS_MASK) >> \
+ HI_WOW_EXT_NUM_PATTERNS_SHIFT)
+#define HI_WOW_EXT_GET_PATTERN_SIZE(config) \
+ (((config) & HI_WOW_EXT_PATTERN_SIZE_MASK) >> \
+ HI_WOW_EXT_PATTERN_SIZE_SHIFT)
+
+/*
+ * Early allocation configuration
+ * Support RAM bank configuration before BMI done and this eases the memory
+ * allocation at very early stage
+ * Bit Range Meaning
+ * --------- ----------------------------------
+ * [0:3] number of bank assigned to be IRAM
+ * [4:15] reserved
+ * [16:31] magic number
+ *
+ * Note:
+ * 1. target firmware would check magic number and if it's a match, firmware
+ * would consider the bits[0:15] are valid and base on that to calculate
+ * the end of DRAM. Early allocation would be located at that area and
+ * may be reclaimed when necessary
+ * 2. if no magic number is found, early allocation would happen at "_end"
+ * symbol of ROM which is located before the app-data and might NOT be
+ * re-claimable. If this is adopted, link script should keep this in
+ * mind to avoid data corruption.
+ */
+#define HI_EARLY_ALLOC_MAGIC 0x6d8a
+#define HI_EARLY_ALLOC_MAGIC_MASK 0xffff0000
+#define HI_EARLY_ALLOC_MAGIC_SHIFT 16
+#define HI_EARLY_ALLOC_IRAM_BANKS_MASK 0x0000000f
+#define HI_EARLY_ALLOC_IRAM_BANKS_SHIFT 0
+
+#define HI_EARLY_ALLOC_VALID() \
+ ((((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_MAGIC_MASK) >> \
+ HI_EARLY_ALLOC_MAGIC_SHIFT) == (HI_EARLY_ALLOC_MAGIC))
+#define HI_EARLY_ALLOC_GET_IRAM_BANKS() \
+ (((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_IRAM_BANKS_MASK) \
+ >> HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
+
+/*power save flag bit definitions*/
+#define HI_PWR_SAVE_LPL_ENABLED 0x1
+/*b1-b3 reserved*/
+/*b4-b5 : dev0 LPL type : 0 - none
+ * 1- Reduce Pwr Search
+ * 2- Reduce Pwr Listen
+ */
+/*b6-b7 : dev1 LPL type and so on for Max 8 devices*/
+#define HI_PWR_SAVE_LPL_DEV0_LSB 4
+#define HI_PWR_SAVE_LPL_DEV_MASK 0x3
+/*power save related utility macros*/
+#define HI_LPL_ENABLED() \
+ ((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
+#define HI_DEV_LPL_TYPE_GET(_devix) \
+ (HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
+ (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix) * 2)))
+
+#define HOST_INTEREST_SMPS_IS_ALLOWED() \
+ ((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
+
+/* Reserve 1024 bytes for extended board data */
+#define QCA988X_BOARD_DATA_SZ 7168
+#define QCA988X_BOARD_EXT_DATA_SZ 0
+
+#define QCA9887_BOARD_DATA_SZ 7168
+#define QCA9887_BOARD_EXT_DATA_SZ 0
+
+#define QCA6174_BOARD_DATA_SZ 8192
+#define QCA6174_BOARD_EXT_DATA_SZ 0
+
+#define QCA9377_BOARD_DATA_SZ QCA6174_BOARD_DATA_SZ
+#define QCA9377_BOARD_EXT_DATA_SZ 0
+
+#define QCA99X0_BOARD_DATA_SZ 12288
+#define QCA99X0_BOARD_EXT_DATA_SZ 0
+
+/* Dual band extended board data */
+#define QCA99X0_EXT_BOARD_DATA_SZ 2048
+#define EXT_BOARD_ADDRESS_OFFSET 0x3000
+
+#define QCA4019_BOARD_DATA_SZ 12064
+#define QCA4019_BOARD_EXT_DATA_SZ 0
+
+#define WCN3990_BOARD_DATA_SZ 26328
+#define WCN3990_BOARD_EXT_DATA_SZ 0
+
+#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
new file mode 100644
index 000000000000..d3bd385694d6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -0,0 +1,662 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "testmode.h"
+
+#include <net/netlink.h>
+#include <linux/firmware.h>
+
+#include "debug.h"
+#include "wmi.h"
+#include "wmi-tlv.h"
+#include "hif.h"
+#include "hw.h"
+#include "core.h"
+
+#include "testmode_i.h"
+
+#define ATH10K_FTM_SEG_NONE ((u32)-1)
+#define ATH10K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0)
+#define ATH10K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4)
+
+static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = {
+ [ATH10K_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [ATH10K_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = ATH10K_TM_DATA_MAX_LEN },
+ [ATH10K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
+ [ATH10K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
+ [ATH10K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
+};
+
+static void ath10k_tm_event_unsegmented(struct ath10k *ar, u32 cmd_id,
+ struct sk_buff *skb)
+{
+ struct sk_buff *nl_skb;
+ int ret;
+
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+ 2 * sizeof(u32) + skb->len,
+ GFP_ATOMIC);
+ if (!nl_skb) {
+ ath10k_warn(ar,
+ "failed to allocate skb for testmode wmi event\n");
+ return;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to put testmode wmi event cmd attribute: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to put testmode wmi event cmd_id: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, skb->len, skb->data);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to copy skb to testmode wmi event: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
+
+static void ath10k_tm_event_segmented(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
+{
+ struct wmi_ftm_cmd *ftm = (struct wmi_ftm_cmd *)skb->data;
+ u8 total_segments, current_seq;
+ struct sk_buff *nl_skb;
+ u8 const *buf_pos;
+ u16 datalen;
+ u32 data_pos;
+ int ret;
+
+ if (skb->len < sizeof(*ftm)) {
+ ath10k_warn(ar, "Invalid ftm event length: %d\n", skb->len);
+ return;
+ }
+
+ current_seq = FIELD_GET(ATH10K_FTM_SEGHDR_CURRENT_SEQ,
+ __le32_to_cpu(ftm->seg_hdr.segmentinfo));
+ total_segments = FIELD_GET(ATH10K_FTM_SEGHDR_TOTAL_SEGMENTS,
+ __le32_to_cpu(ftm->seg_hdr.segmentinfo));
+ datalen = skb->len - sizeof(*ftm);
+ buf_pos = ftm->data;
+
+ if (current_seq == 0) {
+ ar->testmode.expected_seq = 0;
+ ar->testmode.data_pos = 0;
+ }
+
+ data_pos = ar->testmode.data_pos;
+
+ if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) {
+ ath10k_warn(ar, "Invalid ftm event length at %u: %u\n",
+ data_pos, datalen);
+ ret = -EINVAL;
+ return;
+ }
+
+ memcpy(&ar->testmode.eventdata[data_pos], buf_pos, datalen);
+ data_pos += datalen;
+
+ if (++ar->testmode.expected_seq != total_segments) {
+ ar->testmode.data_pos = data_pos;
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "partial data received %u/%u\n",
+ current_seq + 1, total_segments);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "total data length %u\n", data_pos);
+
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+ 2 * sizeof(u32) + data_pos,
+ GFP_ATOMIC);
+ if (!nl_skb) {
+ ath10k_warn(ar, "failed to allocate skb for testmode wmi event\n");
+ return;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_TLV);
+ if (ret) {
+ ath10k_warn(ar, "failed to put testmode wmi event attribute: %d\n", ret);
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to put testmode wmi event cmd_id: %d\n", ret);
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, data_pos, &ar->testmode.eventdata[0]);
+ if (ret) {
+ ath10k_warn(ar, "failed to copy skb to testmode wmi event: %d\n", ret);
+ kfree_skb(nl_skb);
+ return;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
+
+/* Returns true if callee consumes the skb and the skb should be discarded.
+ * Returns false if skb is not used. Does not sleep.
+ */
+bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
+{
+ bool consumed;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+ "testmode event wmi cmd_id %d skb %p skb->len %d\n",
+ cmd_id, skb, skb->len);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (!ar->testmode.utf_monitor) {
+ consumed = false;
+ goto out;
+ }
+
+ /* Only testmode.c should be handling events from utf firmware,
+ * otherwise all sort of problems will arise as mac80211 operations
+ * are not initialised.
+ */
+ consumed = true;
+
+ if (ar->testmode.expected_seq != ATH10K_FTM_SEG_NONE)
+ ath10k_tm_event_segmented(ar, cmd_id, skb);
+ else
+ ath10k_tm_event_unsegmented(ar, cmd_id, skb);
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+
+ return consumed;
+}
+
+static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[])
+{
+ struct sk_buff *skb;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+ "testmode cmd get version_major %d version_minor %d\n",
+ ATH10K_TESTMODE_VERSION_MAJOR,
+ ATH10K_TESTMODE_VERSION_MINOR);
+
+ skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
+ nla_total_size(sizeof(u32)));
+ if (!skb)
+ return -ENOMEM;
+
+ ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MAJOR,
+ ATH10K_TESTMODE_VERSION_MAJOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MINOR,
+ ATH10K_TESTMODE_VERSION_MINOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ ret = nla_put_u32(skb, ATH10K_TM_ATTR_WMI_OP_VERSION,
+ ar->normal_mode_fw.fw_file.wmi_op_version);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ return cfg80211_testmode_reply(skb);
+}
+
+static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file)
+{
+ char filename[100];
+ int ret;
+
+ snprintf(filename, sizeof(filename), "%s/%s",
+ ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
+
+ /* load utf firmware image */
+ ret = firmware_request_nowarn(&fw_file->firmware, filename, ar->dev);
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode fw request '%s': %d\n",
+ filename, ret);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
+ filename, ret);
+ return ret;
+ }
+
+ /* We didn't find FW UTF API 1 ("utf.bin") does not advertise
+ * firmware features. Do an ugly hack where we force the firmware
+ * features to match with 10.1 branch so that wmi.c will use the
+ * correct WMI interface.
+ */
+
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+ fw_file->firmware_data = fw_file->firmware->data;
+ fw_file->firmware_len = fw_file->firmware->size;
+
+ return 0;
+}
+
+static int ath10k_tm_fetch_firmware(struct ath10k *ar)
+{
+ struct ath10k_fw_components *utf_mode_fw;
+ int ret;
+ char fw_name[100];
+ int fw_api2 = 2;
+
+ switch (ar->hif.bus) {
+ case ATH10K_BUS_SDIO:
+ case ATH10K_BUS_USB:
+ scnprintf(fw_name, sizeof(fw_name), "%s-%s-%d.bin",
+ ATH10K_FW_UTF_FILE_BASE, ath10k_bus_str(ar->hif.bus),
+ fw_api2);
+ break;
+ default:
+ scnprintf(fw_name, sizeof(fw_name), "%s-%d.bin",
+ ATH10K_FW_UTF_FILE_BASE, fw_api2);
+ break;
+ }
+
+ ret = ath10k_core_fetch_firmware_api_n(ar, fw_name,
+ &ar->testmode.utf_mode_fw.fw_file);
+ if (ret == 0) {
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2");
+ goto out;
+ }
+
+ ret = ath10k_tm_fetch_utf_firmware_api_1(ar, &ar->testmode.utf_mode_fw.fw_file);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch utf firmware binary: %d", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using utf api 1");
+
+out:
+ utf_mode_fw = &ar->testmode.utf_mode_fw;
+
+ /* Use the same board data file as the normal firmware uses (but
+ * it's still "owned" by normal_mode_fw so we shouldn't free it.
+ */
+ utf_mode_fw->board_data = ar->normal_mode_fw.board_data;
+ utf_mode_fw->board_len = ar->normal_mode_fw.board_len;
+
+ if (!utf_mode_fw->fw_file.otp_data) {
+ ath10k_info(ar, "utf.bin didn't contain otp binary, taking it from the normal mode firmware");
+ utf_mode_fw->fw_file.otp_data = ar->normal_mode_fw.fw_file.otp_data;
+ utf_mode_fw->fw_file.otp_len = ar->normal_mode_fw.fw_file.otp_len;
+ }
+
+ return 0;
+}
+
+static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
+{
+ const char *ver;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf start\n");
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_UTF) {
+ ret = -EALREADY;
+ goto err;
+ }
+
+ /* start utf only when the driver is not in use */
+ if (ar->state != ATH10K_STATE_OFF) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ if (WARN_ON(ar->testmode.utf_mode_fw.fw_file.firmware != NULL)) {
+ /* utf image is already downloaded, it shouldn't be */
+ ret = -EEXIST;
+ goto err;
+ }
+
+ ret = ath10k_tm_fetch_firmware(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch UTF firmware: %d", ret);
+ goto err;
+ }
+
+ if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+ ar->testmode.utf_mode_fw.fw_file.codeswap_len) {
+ ret = ath10k_swap_code_seg_init(ar,
+ &ar->testmode.utf_mode_fw.fw_file);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to init utf code swap segment: %d\n",
+ ret);
+ goto err_release_utf_mode_fw;
+ }
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ ar->testmode.utf_monitor = true;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n",
+ ar->testmode.utf_mode_fw.fw_file.wmi_op_version);
+
+ ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_UTF);
+ if (ret) {
+ ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret);
+ ar->state = ATH10K_STATE_OFF;
+ goto err_release_utf_mode_fw;
+ }
+
+ ar->testmode.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH, GFP_KERNEL);
+ if (!ar->testmode.eventdata) {
+ ret = -ENOMEM;
+ goto err_power_down;
+ }
+
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF,
+ &ar->testmode.utf_mode_fw);
+ if (ret) {
+ ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
+ ar->state = ATH10K_STATE_OFF;
+ goto err_release_eventdata;
+ }
+
+ ar->state = ATH10K_STATE_UTF;
+
+ if (strlen(ar->testmode.utf_mode_fw.fw_file.fw_version) > 0)
+ ver = ar->testmode.utf_mode_fw.fw_file.fw_version;
+ else
+ ver = "API 1";
+
+ ath10k_info(ar, "UTF firmware %s started\n", ver);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+
+err_release_eventdata:
+ kfree(ar->testmode.eventdata);
+ ar->testmode.eventdata = NULL;
+
+err_power_down:
+ ath10k_hif_power_down(ar);
+
+err_release_utf_mode_fw:
+ if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+ ar->testmode.utf_mode_fw.fw_file.codeswap_len)
+ ath10k_swap_code_seg_release(ar,
+ &ar->testmode.utf_mode_fw.fw_file);
+
+ release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+ ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_core_stop(ar);
+ ath10k_hif_power_down(ar);
+
+ spin_lock_bh(&ar->data_lock);
+
+ ar->testmode.utf_monitor = false;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+ ar->testmode.utf_mode_fw.fw_file.codeswap_len)
+ ath10k_swap_code_seg_release(ar,
+ &ar->testmode.utf_mode_fw.fw_file);
+
+ release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+ ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
+
+ kfree(ar->testmode.eventdata);
+ ar->testmode.eventdata = NULL;
+
+ ar->state = ATH10K_STATE_OFF;
+}
+
+static int ath10k_tm_cmd_utf_stop(struct ath10k *ar, struct nlattr *tb[])
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf stop\n");
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ __ath10k_tm_cmd_utf_stop(ar);
+
+ ret = 0;
+
+ ath10k_info(ar, "UTF firmware stopped\n");
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
+{
+ struct sk_buff *skb;
+ int ret, buf_len;
+ u32 cmd_id;
+ void *buf;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (!tb[ATH10K_TM_ATTR_DATA]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!tb[ATH10K_TM_ATTR_WMI_CMDID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ buf = nla_data(tb[ATH10K_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]);
+ cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+ "testmode cmd wmi cmd_id %d buf %p buf_len %d\n",
+ cmd_id, buf, buf_len);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+ skb = ath10k_wmi_alloc_skb(ar, buf_len);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(skb->data, buf, buf_len);
+
+ ret = ath10k_wmi_cmd_send(ar, skb, cmd_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit wmi command (testmode): %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_tm_cmd_tlv(struct ath10k *ar, struct nlattr *tb[])
+{
+ u16 total_bytes, num_segments;
+ u32 cmd_id, buf_len;
+ u8 segnumber = 0;
+ u8 *bufpos;
+ void *buf;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ buf = nla_data(tb[ATH10K_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]);
+ cmd_id = WMI_PDEV_UTF_CMDID;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+ "cmd wmi ftm cmd_id %d buffer length %d\n",
+ cmd_id, buf_len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+ bufpos = buf;
+ total_bytes = buf_len;
+ num_segments = total_bytes / MAX_WMI_UTF_LEN;
+ ar->testmode.expected_seq = 0;
+
+ if (buf_len - (num_segments * MAX_WMI_UTF_LEN))
+ num_segments++;
+
+ while (buf_len) {
+ u16 chunk_len = min_t(u16, buf_len, MAX_WMI_UTF_LEN);
+ struct wmi_ftm_cmd *ftm_cmd;
+ struct sk_buff *skb;
+ u32 hdr_info;
+ u8 seginfo;
+
+ skb = ath10k_wmi_alloc_skb(ar, (chunk_len +
+ sizeof(struct wmi_ftm_cmd)));
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ftm_cmd = (struct wmi_ftm_cmd *)skb->data;
+ hdr_info = FIELD_PREP(WMI_TLV_TAG, WMI_TLV_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, (chunk_len +
+ sizeof(struct wmi_ftm_seg_hdr)));
+ ftm_cmd->tlv_header = __cpu_to_le32(hdr_info);
+ ftm_cmd->seg_hdr.len = __cpu_to_le32(total_bytes);
+ ftm_cmd->seg_hdr.msgref = __cpu_to_le32(ar->testmode.ftm_msgref);
+ seginfo = FIELD_PREP(ATH10K_FTM_SEGHDR_TOTAL_SEGMENTS, num_segments) |
+ FIELD_PREP(ATH10K_FTM_SEGHDR_CURRENT_SEQ, segnumber);
+ ftm_cmd->seg_hdr.segmentinfo = __cpu_to_le32(seginfo);
+ segnumber++;
+
+ memcpy(&ftm_cmd->data, bufpos, chunk_len);
+
+ ret = ath10k_wmi_cmd_send(ar, skb, cmd_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to send wmi ftm command: %d\n", ret);
+ goto out;
+ }
+
+ buf_len -= chunk_len;
+ bufpos += chunk_len;
+ }
+
+ ar->testmode.ftm_msgref++;
+ ret = 0;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct ath10k *ar = hw->priv;
+ struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1];
+ int ret;
+
+ ret = nla_parse_deprecated(tb, ATH10K_TM_ATTR_MAX, data, len,
+ ath10k_tm_policy, NULL);
+ if (ret)
+ return ret;
+
+ if (!tb[ATH10K_TM_ATTR_CMD])
+ return -EINVAL;
+
+ ar->testmode.expected_seq = ATH10K_FTM_SEG_NONE;
+
+ switch (nla_get_u32(tb[ATH10K_TM_ATTR_CMD])) {
+ case ATH10K_TM_CMD_GET_VERSION:
+ if (!tb[ATH10K_TM_ATTR_DATA])
+ return ath10k_tm_cmd_get_version(ar, tb);
+ else /* ATH10K_TM_CMD_TLV */
+ return ath10k_tm_cmd_tlv(ar, tb);
+ case ATH10K_TM_CMD_UTF_START:
+ return ath10k_tm_cmd_utf_start(ar, tb);
+ case ATH10K_TM_CMD_UTF_STOP:
+ return ath10k_tm_cmd_utf_stop(ar, tb);
+ case ATH10K_TM_CMD_WMI:
+ return ath10k_tm_cmd_wmi(ar, tb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void ath10k_testmode_destroy(struct ath10k *ar)
+{
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_UTF) {
+ /* utf firmware is not running, nothing to do */
+ goto out;
+ }
+
+ __ath10k_tm_cmd_utf_stop(ar);
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+}
diff --git a/drivers/net/wireless/ath/ath10k/testmode.h b/drivers/net/wireless/ath/ath10k/testmode.h
new file mode 100644
index 000000000000..6488fd514ae3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/testmode.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ */
+
+#include "core.h"
+
+#ifdef CONFIG_NL80211_TESTMODE
+
+void ath10k_testmode_destroy(struct ath10k *ar);
+
+bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb);
+int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+
+#else
+
+static inline void ath10k_testmode_destroy(struct ath10k *ar)
+{
+}
+
+static inline bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id,
+ struct sk_buff *skb)
+{
+ return false;
+}
+
+static inline int ath10k_tm_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h
new file mode 100644
index 000000000000..1603f5276682
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/testmode_i.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+/* "API" level of the ath10k testmode interface. Bump it after every
+ * incompatible interface change.
+ */
+#define ATH10K_TESTMODE_VERSION_MAJOR 1
+
+/* Bump this after every _compatible_ interface change, for example
+ * addition of a new command or an attribute.
+ */
+#define ATH10K_TESTMODE_VERSION_MINOR 0
+
+#define ATH10K_TM_DATA_MAX_LEN 5000
+#define ATH_FTM_EVENT_MAX_BUF_LENGTH 2048
+
+enum ath10k_tm_attr {
+ __ATH10K_TM_ATTR_INVALID = 0,
+ ATH10K_TM_ATTR_CMD = 1,
+ ATH10K_TM_ATTR_DATA = 2,
+ ATH10K_TM_ATTR_WMI_CMDID = 3,
+ ATH10K_TM_ATTR_VERSION_MAJOR = 4,
+ ATH10K_TM_ATTR_VERSION_MINOR = 5,
+ ATH10K_TM_ATTR_WMI_OP_VERSION = 6,
+
+ /* keep last */
+ __ATH10K_TM_ATTR_AFTER_LAST,
+ ATH10K_TM_ATTR_MAX = __ATH10K_TM_ATTR_AFTER_LAST - 1,
+};
+
+/* All ath10k testmode interface commands specified in
+ * ATH10K_TM_ATTR_CMD
+ */
+enum ath10k_tm_cmd {
+ /* Returns the supported ath10k testmode interface version in
+ * ATH10K_TM_ATTR_VERSION. Always guaranteed to work. User space
+ * uses this to verify it's using the correct version of the
+ * testmode interface
+ */
+ ATH10K_TM_CMD_GET_VERSION = 0,
+
+ /* Boots the UTF firmware, the netdev interface must be down at the
+ * time.
+ */
+ ATH10K_TM_CMD_UTF_START = 1,
+
+ /* Shuts down the UTF firmware and puts the driver back into OFF
+ * state.
+ */
+ ATH10K_TM_CMD_UTF_STOP = 2,
+
+ /* The command used to transmit a WMI command to the firmware and
+ * the event to receive WMI events from the firmware. Without
+ * struct wmi_cmd_hdr header, only the WMI payload. Command id is
+ * provided with ATH10K_TM_ATTR_WMI_CMDID and payload in
+ * ATH10K_TM_ATTR_DATA.
+ */
+ ATH10K_TM_CMD_WMI = 3,
+
+ /* The command used to transmit a test command to the firmware
+ * and the event to receive test events from the firmware. The data
+ * received only contain the TLV payload, need to add the tlv header
+ * and send the cmd to firmware with command id WMI_PDEV_UTF_CMDID.
+ * The data payload size could be large and the driver needs to
+ * send segmented data to firmware.
+ *
+ * This legacy testmode command shares the same value as the get-version
+ * command. To distinguish between them, we check whether the data attribute
+ * is present.
+ */
+ ATH10K_TM_CMD_TLV = ATH10K_TM_CMD_GET_VERSION,
+};
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
new file mode 100644
index 000000000000..8b15ec07b107
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include "core.h"
+#include "debug.h"
+#include "wmi-ops.h"
+
+static int
+ath10k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = ATH10K_THERMAL_THROTTLE_MAX;
+
+ return 0;
+}
+
+static int
+ath10k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct ath10k *ar = cdev->devdata;
+
+ mutex_lock(&ar->conf_mutex);
+ *state = ar->thermal.throttle_state;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int
+ath10k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long throttle_state)
+{
+ struct ath10k *ar = cdev->devdata;
+
+ if (throttle_state > ATH10K_THERMAL_THROTTLE_MAX) {
+ ath10k_warn(ar, "throttle state %ld is exceeding the limit %d\n",
+ throttle_state, ATH10K_THERMAL_THROTTLE_MAX);
+ return -EINVAL;
+ }
+ mutex_lock(&ar->conf_mutex);
+ ar->thermal.throttle_state = throttle_state;
+ ath10k_thermal_set_throttling(ar);
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops ath10k_thermal_ops = {
+ .get_max_state = ath10k_thermal_get_max_throttle_state,
+ .get_cur_state = ath10k_thermal_get_cur_throttle_state,
+ .set_cur_state = ath10k_thermal_set_cur_throttle_state,
+};
+
+static ssize_t ath10k_thermal_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ath10k *ar = dev_get_drvdata(dev);
+ int ret, temperature;
+ unsigned long time_left;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* Can't get temperature when the card is off */
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ reinit_completion(&ar->thermal.wmi_sync);
+ ret = ath10k_wmi_pdev_get_temperature(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to read temperature %d\n", ret);
+ goto out;
+ }
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+ ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath10k_warn(ar, "failed to synchronize thermal read\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ temperature = ar->thermal.temperature;
+ spin_unlock_bh(&ar->data_lock);
+
+ /* display in millidegree celsius */
+ ret = sysfs_emit(buf, "%d\n", temperature * 1000);
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->thermal.temperature = temperature;
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->thermal.wmi_sync);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, ath10k_thermal_show_temp,
+ NULL, 0);
+
+static struct attribute *ath10k_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ath10k_hwmon);
+
+void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+ u32 period, duration, enabled;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+ return;
+
+ if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
+ return;
+
+ if (ar->state != ATH10K_STATE_ON)
+ return;
+
+ period = ar->thermal.quiet_period;
+ duration = (period * ar->thermal.throttle_state) / 100;
+ enabled = duration ? 1 : 0;
+
+ ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
+ ATH10K_QUIET_START_OFFSET,
+ enabled);
+ if (ret) {
+ ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
+ period, duration, enabled, ret);
+ }
+}
+
+int ath10k_thermal_register(struct ath10k *ar)
+{
+ struct thermal_cooling_device *cdev;
+ struct device *hwmon_dev;
+ int ret;
+
+ if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+ return 0;
+
+ cdev = thermal_cooling_device_register("ath10k_thermal", ar,
+ &ath10k_thermal_ops);
+
+ if (IS_ERR(cdev)) {
+ ath10k_err(ar, "failed to setup thermal device result: %ld\n",
+ PTR_ERR(cdev));
+ return -EINVAL;
+ }
+
+ ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
+ "cooling_device");
+ if (ret) {
+ ath10k_err(ar, "failed to create cooling device symlink\n");
+ goto err_cooling_destroy;
+ }
+
+ ar->thermal.cdev = cdev;
+ ar->thermal.quiet_period = ATH10K_QUIET_PERIOD_DEFAULT;
+
+ /* Do not register hwmon device when temperature reading is not
+ * supported by firmware
+ */
+ if (!(ar->wmi.ops->gen_pdev_get_temperature))
+ return 0;
+
+ /* Avoid linking error on devm_hwmon_device_register_with_groups, I
+ * guess linux/hwmon.h is missing proper stubs.
+ */
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return 0;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
+ "ath10k_hwmon", ar,
+ ath10k_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ ath10k_err(ar, "failed to register hwmon device: %ld\n",
+ PTR_ERR(hwmon_dev));
+ ret = -EINVAL;
+ goto err_remove_link;
+ }
+ return 0;
+
+err_remove_link:
+ sysfs_remove_link(&ar->dev->kobj, "cooling_device");
+err_cooling_destroy:
+ thermal_cooling_device_unregister(cdev);
+ return ret;
+}
+
+void ath10k_thermal_unregister(struct ath10k *ar)
+{
+ if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
+ return;
+
+ sysfs_remove_link(&ar->dev->kobj, "cooling_device");
+ thermal_cooling_device_unregister(ar->thermal.cdev);
+}
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
new file mode 100644
index 000000000000..1f4de9fbf2b3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ */
+#ifndef _THERMAL_
+#define _THERMAL_
+
+#define ATH10K_QUIET_PERIOD_DEFAULT 100
+#define ATH10K_QUIET_PERIOD_MIN 25
+#define ATH10K_QUIET_START_OFFSET 10
+#define ATH10K_HWMON_NAME_LEN 15
+#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_THERMAL_THROTTLE_MAX 100
+
+struct ath10k_thermal {
+ struct thermal_cooling_device *cdev;
+ struct completion wmi_sync;
+
+ /* protected by conf_mutex */
+ u32 throttle_state;
+ u32 quiet_period;
+ /* temperature value in Celsius degree
+ * protected by data_lock
+ */
+ int temperature;
+};
+
+#if IS_REACHABLE(CONFIG_THERMAL)
+int ath10k_thermal_register(struct ath10k *ar);
+void ath10k_thermal_unregister(struct ath10k *ar);
+void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
+void ath10k_thermal_set_throttling(struct ath10k *ar);
+#else
+static inline int ath10k_thermal_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_thermal_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_thermal_event_temperature(struct ath10k *ar,
+ int temperature)
+{
+}
+
+static inline void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+}
+
+#endif
+#endif /* _THERMAL_ */
diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c
new file mode 100644
index 000000000000..421ec47c59bd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/export.h>
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+EXPORT_SYMBOL(__tracepoint_ath10k_log_dbg);
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
new file mode 100644
index 000000000000..68b78ca17eaa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
+ */
+
+#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <linux/tracepoint.h>
+#include "core.h"
+
+#if !defined(_TRACE_H_)
+static inline u32 ath10k_frm_hdr_len(const void *buf, size_t len)
+{
+ const struct ieee80211_hdr *hdr = buf;
+
+ /* In some rare cases (e.g. fcs error) device reports frame buffer
+ * shorter than what frame header implies (e.g. len = 0). The buffer
+ * can still be accessed so do a simple min() to guarantee caller
+ * doesn't get value greater than len.
+ */
+ return min_t(u32, len, ieee80211_hdrlen(hdr->frame_control));
+}
+#endif
+
+#define _TRACE_H_
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH10K_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {} \
+static inline bool trace_##name##_enabled(void) \
+{ \
+ return false; \
+}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH10K_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath10k
+
+#define ATH10K_MSG_MAX 400
+
+DECLARE_EVENT_CLASS(ath10k_log_event,
+ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+ TP_ARGS(ar, vaf),
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __vstring(msg, vaf->fmt, vaf->va)
+ ),
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
+ ),
+ TP_printk(
+ "%s %s %s",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(msg)
+ )
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_err,
+ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+ TP_ARGS(ar, vaf)
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_warn,
+ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+ TP_ARGS(ar, vaf)
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_info,
+ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+ TP_ARGS(ar, vaf)
+);
+
+TRACE_EVENT(ath10k_log_dbg,
+ TP_PROTO(struct ath10k *ar, unsigned int level, struct va_format *vaf),
+ TP_ARGS(ar, level, vaf),
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(unsigned int, level)
+ __vstring(msg, vaf->fmt, vaf->va)
+ ),
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->level = level;
+ __assign_vstr(msg, vaf->fmt, vaf->va);
+ ),
+ TP_printk(
+ "%s %s %s",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(msg)
+ )
+);
+
+TRACE_EVENT(ath10k_log_dbg_dump,
+ TP_PROTO(struct ath10k *ar, const char *msg, const char *prefix,
+ const void *buf, size_t buf_len),
+
+ TP_ARGS(ar, msg, prefix, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __string(msg, msg)
+ __string(prefix, prefix)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __assign_str(msg);
+ __assign_str(prefix);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s %s/%s\n",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(prefix),
+ __get_str(msg)
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_cmd,
+ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
+
+ TP_ARGS(ar, id, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->id = id;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s id %d len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->id,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_event,
+ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
+
+ TP_ARGS(ar, id, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->id = id;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s id %d len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->id,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_htt_stats,
+ TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
+
+ TP_ARGS(ar, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_dbglog,
+ TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
+
+ TP_ARGS(ar, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u8, hw_type)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->hw_type = ar->hw_rev;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s %d len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->hw_type,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_htt_pktlog,
+ TP_PROTO(struct ath10k *ar, const void *buf, u16 buf_len),
+
+ TP_ARGS(ar, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u8, hw_type)
+ __field(u16, buf_len)
+ __dynamic_array(u8, pktlog, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->hw_type = ar->hw_rev;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(pktlog), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s %d size %u",
+ __get_str(driver),
+ __get_str(device),
+ __entry->hw_type,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_htt_tx,
+ TP_PROTO(struct ath10k *ar, u16 msdu_id, u16 msdu_len,
+ u8 vdev_id, u8 tid),
+
+ TP_ARGS(ar, msdu_id, msdu_len, vdev_id, tid),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, msdu_id)
+ __field(u16, msdu_len)
+ __field(u8, vdev_id)
+ __field(u8, tid)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->msdu_id = msdu_id;
+ __entry->msdu_len = msdu_len;
+ __entry->vdev_id = vdev_id;
+ __entry->tid = tid;
+ ),
+
+ TP_printk(
+ "%s %s msdu_id %d msdu_len %d vdev_id %d tid %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->msdu_id,
+ __entry->msdu_len,
+ __entry->vdev_id,
+ __entry->tid
+ )
+);
+
+TRACE_EVENT(ath10k_txrx_tx_unref,
+ TP_PROTO(struct ath10k *ar, u16 msdu_id),
+
+ TP_ARGS(ar, msdu_id),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, msdu_id)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->msdu_id = msdu_id;
+ ),
+
+ TP_printk(
+ "%s %s msdu_id %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->msdu_id
+ )
+);
+
+DECLARE_EVENT_CLASS(ath10k_hdr_event,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(size_t, len)
+ __dynamic_array(u8, data, ath10k_frm_hdr_len(data, len))
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->len = ath10k_frm_hdr_len(data, len);
+ memcpy(__get_dynamic_array(data), data, __entry->len);
+ ),
+
+ TP_printk(
+ "%s %s len %zu\n",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+DECLARE_EVENT_CLASS(ath10k_payload_event,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(size_t, len)
+ __dynamic_array(u8, payload, (len -
+ ath10k_frm_hdr_len(data, len)))
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->len = len - ath10k_frm_hdr_len(data, len);
+ memcpy(__get_dynamic_array(payload),
+ data + ath10k_frm_hdr_len(data, len), __entry->len);
+ ),
+
+ TP_printk(
+ "%s %s len %zu\n",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+DEFINE_EVENT(ath10k_hdr_event, ath10k_tx_hdr,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_payload_event, ath10k_tx_payload,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_hdr_event, ath10k_rx_hdr,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_payload_event, ath10k_rx_payload,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+TRACE_EVENT(ath10k_htt_rx_desc,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u8, hw_type)
+ __field(u16, len)
+ __dynamic_array(u8, rxdesc, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->hw_type = ar->hw_rev;
+ __entry->len = len;
+ memcpy(__get_dynamic_array(rxdesc), data, len);
+ ),
+
+ TP_printk(
+ "%s %s %d rxdesc len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->hw_type,
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_diag_container,
+ TP_PROTO(struct ath10k *ar,
+ u8 type,
+ u32 timestamp,
+ u32 code,
+ u16 len,
+ const void *data),
+
+ TP_ARGS(ar, type, timestamp, code, len, data),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u8, type)
+ __field(u32, timestamp)
+ __field(u32, code)
+ __field(u16, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->type = type;
+ __entry->timestamp = timestamp;
+ __entry->code = code;
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk(
+ "%s %s diag container type %u timestamp %u code %u len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->type,
+ __entry->timestamp,
+ __entry->code,
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_diag,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk(
+ "%s %s tlv diag len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
new file mode 100644
index 000000000000..493bfb410aff
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+#include "txrx.h"
+#include "htt.h"
+#include "mac.h"
+#include "debug.h"
+
+static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (likely(!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)))
+ return;
+
+ if (ath10k_mac_tx_frm_has_freq(ar))
+ return;
+
+ /* If the original wait_for_completion() timed out before
+ * {data,mgmt}_tx_completed() was called then we could complete
+ * offchan_tx_completed for a different skb. Prevent this by using
+ * offchan_tx_skb.
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->offchan_tx_skb != skb) {
+ ath10k_warn(ar, "completed old offchannel frame\n");
+ goto out;
+ }
+
+ complete(&ar->offchan_tx_completed);
+ ar->offchan_tx_skb = NULL; /* just for sanity */
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %p\n", skb);
+out:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done)
+{
+ struct ieee80211_tx_status status;
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_txq *txq;
+ struct ath10k_skb_cb *skb_cb;
+ struct ath10k_txq *artxq;
+ struct sk_buff *msdu;
+ u8 flags;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt tx completion msdu_id %u status %d\n",
+ tx_done->msdu_id, tx_done->status);
+
+ if (tx_done->msdu_id >= htt->max_num_pending_tx) {
+ ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
+ tx_done->msdu_id);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&htt->tx_lock);
+ msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
+ if (!msdu) {
+ ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
+ tx_done->msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+ return -ENOENT;
+ }
+
+ skb_cb = ATH10K_SKB_CB(msdu);
+ txq = skb_cb->txq;
+
+ if (txq) {
+ artxq = (void *)txq->drv_priv;
+ artxq->num_fw_queued--;
+ }
+
+ flags = skb_cb->flags;
+ ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&htt->tx_lock);
+
+ rcu_read_lock();
+ if (txq && txq->sta && skb_cb->airtime_est)
+ ieee80211_sta_register_airtime(txq->sta, txq->tid,
+ skb_cb->airtime_est, 0);
+ rcu_read_unlock();
+
+ if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ ath10k_report_offchan_tx(htt->ar, msdu);
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+ info->status.rates[0].idx = -1;
+
+ trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
+ !(flags & ATH10K_SKB_F_NOACK_TID))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+
+ if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
+ ((info->flags & IEEE80211_TX_CTL_NO_ACK) ||
+ (flags & ATH10K_SKB_F_NOACK_TID)))
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
+ if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
+ if ((info->flags & IEEE80211_TX_CTL_NO_ACK) ||
+ (flags & ATH10K_SKB_F_NOACK_TID))
+ info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+ }
+
+ if (tx_done->status == HTT_TX_COMPL_STATE_ACK &&
+ tx_done->ack_rssi != ATH10K_INVALID_RSSI) {
+ info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ tx_done->ack_rssi;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ }
+
+ memset(&status, 0, sizeof(status));
+ status.skb = msdu;
+ status.info = info;
+
+ rcu_read_lock();
+
+ if (txq)
+ status.sta = txq->sta;
+
+ ieee80211_tx_status_ext(htt->ar->hw, &status);
+
+ rcu_read_unlock();
+
+ /* we do not own the msdu anymore */
+
+ return 0;
+}
+
+struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
+ const u8 *addr)
+{
+ struct ath10k_peer *peer;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ list_for_each_entry(peer, &ar->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+ if (!ether_addr_equal(peer->addr, addr))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
+{
+ struct ath10k_peer *peer;
+
+ if (peer_id >= BITS_PER_TYPE(peer->peer_ids))
+ return NULL;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ list_for_each_entry(peer, &ar->peers, list)
+ if (test_bit(peer_id, peer->peer_ids))
+ return peer;
+
+ return NULL;
+}
+
+static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
+ const u8 *addr, bool expect_mapped)
+{
+ long time_left;
+
+ time_left = wait_event_timeout(ar->peer_mapping_wq, ({
+ bool mapped;
+
+ spin_lock_bh(&ar->data_lock);
+ mapped = !!ath10k_peer_find(ar, vdev_id, addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ (mapped == expect_mapped ||
+ test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
+ }), 3 * HZ);
+
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
+{
+ return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
+}
+
+int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
+{
+ return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
+}
+
+void ath10k_peer_map_event(struct ath10k_htt *htt,
+ struct htt_peer_map_event *ev)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_peer *peer;
+
+ if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+ ath10k_warn(ar,
+ "received htt peer map event with idx out of bounds: %u\n",
+ ev->peer_id);
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
+ if (!peer) {
+ peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+ if (!peer)
+ goto exit;
+
+ peer->vdev_id = ev->vdev_id;
+ ether_addr_copy(peer->addr, ev->addr);
+ list_add(&peer->list, &ar->peers);
+ wake_up(&ar->peer_mapping_wq);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
+ ev->vdev_id, ev->addr, ev->peer_id);
+
+ WARN_ON(ar->peer_map[ev->peer_id] && (ar->peer_map[ev->peer_id] != peer));
+ ar->peer_map[ev->peer_id] = peer;
+ set_bit(ev->peer_id, peer->peer_ids);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_peer_unmap_event(struct ath10k_htt *htt,
+ struct htt_peer_unmap_event *ev)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_peer *peer;
+
+ if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+ ath10k_warn(ar,
+ "received htt peer unmap event with idx out of bounds: %u\n",
+ ev->peer_id);
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, ev->peer_id);
+ if (!peer) {
+ ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
+ ev->peer_id);
+ goto exit;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
+ peer->vdev_id, peer->addr, ev->peer_id);
+
+ ar->peer_map[ev->peer_id] = NULL;
+ clear_bit(ev->peer_id, peer->peer_ids);
+
+ if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
+ list_del(&peer->list);
+ kfree(peer);
+ wake_up(&ar->peer_mapping_wq);
+ }
+
+exit:
+ spin_unlock_bh(&ar->data_lock);
+}
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
new file mode 100644
index 000000000000..ecac441d83a7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2014,2016 Qualcomm Atheros, Inc.
+ */
+#ifndef _TXRX_H_
+#define _TXRX_H_
+
+#include "htt.h"
+
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done);
+
+struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
+ const u8 *addr);
+struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id);
+int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
+ const u8 *addr);
+int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
+ const u8 *addr);
+
+void ath10k_peer_map_event(struct ath10k_htt *htt,
+ struct htt_peer_map_event *ev);
+void ath10k_peer_unmap_event(struct ath10k_htt *htt,
+ struct htt_peer_unmap_event *ev);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
new file mode 100644
index 000000000000..1732a4f98418
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -0,0 +1,1130 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2007-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "debug.h"
+#include "core.h"
+#include "bmi.h"
+#include "hif.h"
+#include "htc.h"
+#include "usb.h"
+
+static void ath10k_usb_post_recv_transfers(struct ath10k *ar,
+ struct ath10k_usb_pipe *recv_pipe);
+
+/* inlined helper functions */
+
+static inline enum ath10k_htc_ep_id
+eid_from_htc_hdr(struct ath10k_htc_hdr *htc_hdr)
+{
+ return (enum ath10k_htc_ep_id)htc_hdr->eid;
+}
+
+static inline bool is_trailer_only_msg(struct ath10k_htc_hdr *htc_hdr)
+{
+ return __le16_to_cpu(htc_hdr->len) == htc_hdr->trailer_len;
+}
+
+/* pipe/urb operations */
+static struct ath10k_urb_context *
+ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe)
+{
+ struct ath10k_urb_context *urb_context = NULL;
+ unsigned long flags;
+
+ /* bail if this pipe is not initialized */
+ if (!pipe->ar_usb)
+ return NULL;
+
+ spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
+ if (!list_empty(&pipe->urb_list_head)) {
+ urb_context = list_first_entry(&pipe->urb_list_head,
+ struct ath10k_urb_context, link);
+ list_del(&urb_context->link);
+ pipe->urb_cnt--;
+ }
+ spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
+
+ return urb_context;
+}
+
+static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe,
+ struct ath10k_urb_context *urb_context)
+{
+ unsigned long flags;
+
+ /* bail if this pipe is not initialized */
+ if (!pipe->ar_usb)
+ return;
+
+ spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
+
+ pipe->urb_cnt++;
+ list_add(&urb_context->link, &pipe->urb_list_head);
+
+ spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
+}
+
+static void ath10k_usb_cleanup_recv_urb(struct ath10k_urb_context *urb_context)
+{
+ dev_kfree_skb(urb_context->skb);
+ urb_context->skb = NULL;
+
+ ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
+}
+
+static void ath10k_usb_free_pipe_resources(struct ath10k *ar,
+ struct ath10k_usb_pipe *pipe)
+{
+ struct ath10k_urb_context *urb_context;
+
+ if (!pipe->ar_usb) {
+ /* nothing allocated for this pipe */
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_USB,
+ "usb free resources lpipe %d hpipe 0x%x urbs %d avail %d\n",
+ pipe->logical_pipe_num, pipe->usb_pipe_handle,
+ pipe->urb_alloc, pipe->urb_cnt);
+
+ if (pipe->urb_alloc != pipe->urb_cnt) {
+ ath10k_dbg(ar, ATH10K_DBG_USB,
+ "usb urb leak lpipe %d hpipe 0x%x urbs %d avail %d\n",
+ pipe->logical_pipe_num, pipe->usb_pipe_handle,
+ pipe->urb_alloc, pipe->urb_cnt);
+ }
+
+ for (;;) {
+ urb_context = ath10k_usb_alloc_urb_from_pipe(pipe);
+
+ if (!urb_context)
+ break;
+
+ kfree(urb_context);
+ }
+}
+
+static void ath10k_usb_cleanup_pipe_resources(struct ath10k *ar)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ int i;
+
+ for (i = 0; i < ATH10K_USB_PIPE_MAX; i++)
+ ath10k_usb_free_pipe_resources(ar, &ar_usb->pipes[i]);
+}
+
+/* hif usb rx/tx completion functions */
+
+static void ath10k_usb_recv_complete(struct urb *urb)
+{
+ struct ath10k_urb_context *urb_context = urb->context;
+ struct ath10k_usb_pipe *pipe = urb_context->pipe;
+ struct ath10k *ar = pipe->ar_usb->ar;
+ struct sk_buff *skb;
+ int status = 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
+ "usb recv pipe %d stat %d len %d urb 0x%p\n",
+ pipe->logical_pipe_num, urb->status, urb->actual_length,
+ urb);
+
+ if (urb->status != 0) {
+ status = -EIO;
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* no need to spew these errors when device
+ * removed or urb killed due to driver shutdown
+ */
+ status = -ECANCELED;
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
+ "usb recv pipe %d ep 0x%2.2x failed: %d\n",
+ pipe->logical_pipe_num,
+ pipe->ep_address, urb->status);
+ break;
+ }
+ goto cleanup_recv_urb;
+ }
+
+ if (urb->actual_length == 0)
+ goto cleanup_recv_urb;
+
+ skb = urb_context->skb;
+
+ /* we are going to pass it up */
+ urb_context->skb = NULL;
+ skb_put(skb, urb->actual_length);
+
+ /* note: queue implements a lock */
+ skb_queue_tail(&pipe->io_comp_queue, skb);
+ schedule_work(&pipe->io_complete_work);
+
+cleanup_recv_urb:
+ ath10k_usb_cleanup_recv_urb(urb_context);
+
+ if (status == 0 &&
+ pipe->urb_cnt >= pipe->urb_cnt_thresh) {
+ /* our free urbs are piling up, post more transfers */
+ ath10k_usb_post_recv_transfers(ar, pipe);
+ }
+}
+
+static void ath10k_usb_transmit_complete(struct urb *urb)
+{
+ struct ath10k_urb_context *urb_context = urb->context;
+ struct ath10k_usb_pipe *pipe = urb_context->pipe;
+ struct ath10k *ar = pipe->ar_usb->ar;
+ struct sk_buff *skb;
+
+ if (urb->status != 0) {
+ ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
+ "pipe: %d, failed:%d\n",
+ pipe->logical_pipe_num, urb->status);
+ }
+
+ skb = urb_context->skb;
+ urb_context->skb = NULL;
+ ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
+
+ /* note: queue implements a lock */
+ skb_queue_tail(&pipe->io_comp_queue, skb);
+ schedule_work(&pipe->io_complete_work);
+}
+
+/* pipe operations */
+static void ath10k_usb_post_recv_transfers(struct ath10k *ar,
+ struct ath10k_usb_pipe *recv_pipe)
+{
+ struct ath10k_urb_context *urb_context;
+ struct urb *urb;
+ int usb_status;
+
+ for (;;) {
+ urb_context = ath10k_usb_alloc_urb_from_pipe(recv_pipe);
+ if (!urb_context)
+ break;
+
+ urb_context->skb = dev_alloc_skb(ATH10K_USB_RX_BUFFER_SIZE);
+ if (!urb_context->skb)
+ goto err;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ goto err;
+
+ usb_fill_bulk_urb(urb,
+ recv_pipe->ar_usb->udev,
+ recv_pipe->usb_pipe_handle,
+ urb_context->skb->data,
+ ATH10K_USB_RX_BUFFER_SIZE,
+ ath10k_usb_recv_complete, urb_context);
+
+ ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
+ "usb bulk recv submit %d 0x%x ep 0x%2.2x len %d buf 0x%p\n",
+ recv_pipe->logical_pipe_num,
+ recv_pipe->usb_pipe_handle, recv_pipe->ep_address,
+ ATH10K_USB_RX_BUFFER_SIZE, urb_context->skb);
+
+ usb_anchor_urb(urb, &recv_pipe->urb_submitted);
+ usb_status = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (usb_status) {
+ ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
+ "usb bulk recv failed: %d\n",
+ usb_status);
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ goto err;
+ }
+ usb_free_urb(urb);
+ }
+
+ return;
+
+err:
+ ath10k_usb_cleanup_recv_urb(urb_context);
+}
+
+static void ath10k_usb_flush_all(struct ath10k *ar)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ int i;
+
+ for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) {
+ if (ar_usb->pipes[i].ar_usb) {
+ usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted);
+ cancel_work_sync(&ar_usb->pipes[i].io_complete_work);
+ }
+ }
+}
+
+static void ath10k_usb_start_recv_pipes(struct ath10k *ar)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+
+ ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA].urb_cnt_thresh = 1;
+
+ ath10k_usb_post_recv_transfers(ar,
+ &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]);
+}
+
+static void ath10k_usb_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_htc_hdr *htc_hdr;
+ struct ath10k_htc_ep *ep;
+
+ htc_hdr = (struct ath10k_htc_hdr *)skb->data;
+ ep = &ar->htc.endpoint[htc_hdr->eid];
+ ath10k_htc_notify_tx_completion(ep, skb);
+ /* The TX complete handler now owns the skb... */
+}
+
+static void ath10k_usb_rx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_hdr *htc_hdr;
+ enum ath10k_htc_ep_id eid;
+ struct ath10k_htc_ep *ep;
+ u16 payload_len;
+ u8 *trailer;
+ int ret;
+
+ htc_hdr = (struct ath10k_htc_hdr *)skb->data;
+ eid = eid_from_htc_hdr(htc_hdr);
+ ep = &ar->htc.endpoint[eid];
+
+ if (ep->service_id == 0) {
+ ath10k_warn(ar, "ep %d is not connected\n", eid);
+ goto out_free_skb;
+ }
+
+ payload_len = le16_to_cpu(htc_hdr->len);
+ if (!payload_len) {
+ ath10k_warn(ar, "zero length frame received, firmware crashed?\n");
+ goto out_free_skb;
+ }
+
+ if (payload_len < htc_hdr->trailer_len) {
+ ath10k_warn(ar, "malformed frame received, firmware crashed?\n");
+ goto out_free_skb;
+ }
+
+ if (htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT) {
+ trailer = skb->data + sizeof(*htc_hdr) + payload_len -
+ htc_hdr->trailer_len;
+
+ ret = ath10k_htc_process_trailer(htc,
+ trailer,
+ htc_hdr->trailer_len,
+ eid,
+ NULL,
+ NULL);
+ if (ret)
+ goto out_free_skb;
+
+ if (is_trailer_only_msg(htc_hdr))
+ goto out_free_skb;
+
+ /* strip off the trailer from the skb since it should not
+ * be passed on to upper layers
+ */
+ skb_trim(skb, skb->len - htc_hdr->trailer_len);
+ }
+
+ skb_pull(skb, sizeof(*htc_hdr));
+ ep->ep_ops.ep_rx_complete(ar, skb);
+ /* The RX complete handler now owns the skb... */
+
+ if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) {
+ local_bh_disable();
+ napi_schedule(&ar->napi);
+ local_bh_enable();
+ }
+
+ return;
+
+out_free_skb:
+ dev_kfree_skb(skb);
+}
+
+static void ath10k_usb_io_comp_work(struct work_struct *work)
+{
+ struct ath10k_usb_pipe *pipe = container_of(work,
+ struct ath10k_usb_pipe,
+ io_complete_work);
+ struct ath10k *ar = pipe->ar_usb->ar;
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&pipe->io_comp_queue))) {
+ if (pipe->flags & ATH10K_USB_PIPE_FLAG_TX)
+ ath10k_usb_tx_complete(ar, skb);
+ else
+ ath10k_usb_rx_complete(ar, skb);
+ }
+}
+
+#define ATH10K_USB_MAX_DIAG_CMD (sizeof(struct ath10k_usb_ctrl_diag_cmd_write))
+#define ATH10K_USB_MAX_DIAG_RESP (sizeof(struct ath10k_usb_ctrl_diag_resp_read))
+
+static void ath10k_usb_destroy(struct ath10k *ar)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+
+ ath10k_usb_flush_all(ar);
+ ath10k_usb_cleanup_pipe_resources(ar);
+ usb_set_intfdata(ar_usb->interface, NULL);
+
+ kfree(ar_usb->diag_cmd_buffer);
+ kfree(ar_usb->diag_resp_buffer);
+}
+
+static int ath10k_usb_hif_start(struct ath10k *ar)
+{
+ int i;
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+
+ ath10k_core_napi_enable(ar);
+ ath10k_usb_start_recv_pipes(ar);
+
+ /* set the TX resource avail threshold for each TX pipe */
+ for (i = ATH10K_USB_PIPE_TX_CTRL;
+ i <= ATH10K_USB_PIPE_TX_DATA_HP; i++) {
+ ar_usb->pipes[i].urb_cnt_thresh =
+ ar_usb->pipes[i].urb_alloc / 2;
+ }
+
+ return 0;
+}
+
+static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ struct ath10k_usb_pipe *pipe = &ar_usb->pipes[pipe_id];
+ struct ath10k_urb_context *urb_context;
+ struct sk_buff *skb;
+ struct urb *urb;
+ int ret, i;
+
+ for (i = 0; i < n_items; i++) {
+ urb_context = ath10k_usb_alloc_urb_from_pipe(pipe);
+ if (!urb_context) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ skb = items[i].transfer_context;
+ urb_context->skb = skb;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ ret = -ENOMEM;
+ goto err_free_urb_to_pipe;
+ }
+
+ usb_fill_bulk_urb(urb,
+ ar_usb->udev,
+ pipe->usb_pipe_handle,
+ skb->data,
+ skb->len,
+ ath10k_usb_transmit_complete, urb_context);
+
+ if (!(skb->len % pipe->max_packet_size)) {
+ /* hit a max packet boundary on this pipe */
+ urb->transfer_flags |= URB_ZERO_PACKET;
+ }
+
+ usb_anchor_urb(urb, &pipe->urb_submitted);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
+ "usb bulk transmit failed: %d\n", ret);
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ ret = -EINVAL;
+ goto err_free_urb_to_pipe;
+ }
+
+ usb_free_urb(urb);
+ }
+
+ return 0;
+
+err_free_urb_to_pipe:
+ ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
+err:
+ return ret;
+}
+
+static void ath10k_usb_hif_stop(struct ath10k *ar)
+{
+ ath10k_usb_flush_all(ar);
+ ath10k_core_napi_sync_disable(ar);
+}
+
+static u16 ath10k_usb_hif_get_free_queue_number(struct ath10k *ar, u8 pipe_id)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+
+ return ar_usb->pipes[pipe_id].urb_cnt;
+}
+
+static int ath10k_usb_submit_ctrl_out(struct ath10k *ar,
+ u8 req, u16 value, u16 index, void *data,
+ u32 size)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ u8 *buf = NULL;
+ int ret;
+
+ if (size > 0) {
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ /* note: if successful returns number of bytes transferred */
+ ret = usb_control_msg(ar_usb->udev,
+ usb_sndctrlpipe(ar_usb->udev, 0),
+ req,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, buf,
+ size, 1000);
+
+ if (ret < 0) {
+ ath10k_warn(ar, "Failed to submit usb control message: %d\n",
+ ret);
+ kfree(buf);
+ return ret;
+ }
+
+ kfree(buf);
+
+ return 0;
+}
+
+static int ath10k_usb_submit_ctrl_in(struct ath10k *ar,
+ u8 req, u16 value, u16 index, void *data,
+ u32 size)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ u8 *buf = NULL;
+ int ret;
+
+ if (size > 0) {
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ /* note: if successful returns number of bytes transferred */
+ ret = usb_control_msg(ar_usb->udev,
+ usb_rcvctrlpipe(ar_usb->udev, 0),
+ req,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, buf,
+ size, 2000);
+
+ if (ret < 0) {
+ ath10k_warn(ar, "Failed to read usb control message: %d\n",
+ ret);
+ kfree(buf);
+ return ret;
+ }
+
+ memcpy((u8 *)data, buf, size);
+
+ kfree(buf);
+
+ return 0;
+}
+
+static int ath10k_usb_ctrl_msg_exchange(struct ath10k *ar,
+ u8 req_val, u8 *req_buf, u32 req_len,
+ u8 resp_val, u8 *resp_buf,
+ u32 *resp_len)
+{
+ int ret;
+
+ /* send command */
+ ret = ath10k_usb_submit_ctrl_out(ar, req_val, 0, 0,
+ req_buf, req_len);
+ if (ret)
+ goto err;
+
+ /* get response */
+ if (resp_buf) {
+ ret = ath10k_usb_submit_ctrl_in(ar, resp_val, 0, 0,
+ resp_buf, *resp_len);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static int ath10k_usb_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ struct ath10k_usb_ctrl_diag_cmd_read *cmd;
+ u32 resp_len;
+ int ret;
+
+ if (buf_len < sizeof(struct ath10k_usb_ctrl_diag_resp_read))
+ return -EINVAL;
+
+ cmd = (struct ath10k_usb_ctrl_diag_cmd_read *)ar_usb->diag_cmd_buffer;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->cmd = ATH10K_USB_CTRL_DIAG_CC_READ;
+ cmd->address = cpu_to_le32(address);
+ resp_len = sizeof(struct ath10k_usb_ctrl_diag_resp_read);
+
+ ret = ath10k_usb_ctrl_msg_exchange(ar,
+ ATH10K_USB_CONTROL_REQ_DIAG_CMD,
+ (u8 *)cmd,
+ sizeof(*cmd),
+ ATH10K_USB_CONTROL_REQ_DIAG_RESP,
+ ar_usb->diag_resp_buffer, &resp_len);
+ if (ret)
+ return ret;
+
+ if (resp_len != sizeof(struct ath10k_usb_ctrl_diag_resp_read))
+ return -EMSGSIZE;
+
+ memcpy(buf, ar_usb->diag_resp_buffer,
+ sizeof(struct ath10k_usb_ctrl_diag_resp_read));
+
+ return 0;
+}
+
+static int ath10k_usb_hif_diag_write(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ struct ath10k_usb_ctrl_diag_cmd_write *cmd;
+ int ret;
+
+ if (nbytes != sizeof(cmd->value))
+ return -EINVAL;
+
+ cmd = (struct ath10k_usb_ctrl_diag_cmd_write *)ar_usb->diag_cmd_buffer;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->cmd = cpu_to_le32(ATH10K_USB_CTRL_DIAG_CC_WRITE);
+ cmd->address = cpu_to_le32(address);
+ memcpy(&cmd->value, data, nbytes);
+
+ ret = ath10k_usb_ctrl_msg_exchange(ar,
+ ATH10K_USB_CONTROL_REQ_DIAG_CMD,
+ (u8 *)cmd,
+ sizeof(*cmd),
+ 0, NULL, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ath10k_usb_bmi_exchange_msg(struct ath10k *ar,
+ void *req, u32 req_len,
+ void *resp, u32 *resp_len)
+{
+ int ret;
+
+ if (req) {
+ ret = ath10k_usb_submit_ctrl_out(ar,
+ ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD,
+ 0, 0, req, req_len);
+ if (ret) {
+ ath10k_warn(ar,
+ "unable to send the bmi data to the device: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (resp) {
+ ret = ath10k_usb_submit_ctrl_in(ar,
+ ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP,
+ 0, 0, resp, *resp_len);
+ if (ret) {
+ ath10k_warn(ar,
+ "Unable to read the bmi data from the device: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ath10k_usb_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ *ul_pipe = ATH10K_USB_PIPE_TX_CTRL;
+ *dl_pipe = ATH10K_USB_PIPE_RX_CTRL;
+}
+
+static int ath10k_usb_hif_map_service_to_pipe(struct ath10k *ar, u16 svc_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ switch (svc_id) {
+ case ATH10K_HTC_SVC_ID_RSVD_CTRL:
+ case ATH10K_HTC_SVC_ID_WMI_CONTROL:
+ *ul_pipe = ATH10K_USB_PIPE_TX_CTRL;
+ /* due to large control packets, shift to data pipe */
+ *dl_pipe = ATH10K_USB_PIPE_RX_DATA;
+ break;
+ case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
+ *ul_pipe = ATH10K_USB_PIPE_TX_DATA_LP;
+ /* Disable rxdata2 directly, it will be enabled
+ * if FW enable rxdata2
+ */
+ *dl_pipe = ATH10K_USB_PIPE_RX_DATA;
+ break;
+ default:
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int ath10k_usb_hif_power_up(struct ath10k *ar,
+ enum ath10k_firmware_mode fw_mode)
+{
+ return 0;
+}
+
+static void ath10k_usb_hif_power_down(struct ath10k *ar)
+{
+ ath10k_usb_flush_all(ar);
+}
+
+#ifdef CONFIG_PM
+
+static int ath10k_usb_hif_suspend(struct ath10k *ar)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ath10k_usb_hif_resume(struct ath10k *ar)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static const struct ath10k_hif_ops ath10k_usb_hif_ops = {
+ .tx_sg = ath10k_usb_hif_tx_sg,
+ .diag_read = ath10k_usb_hif_diag_read,
+ .diag_write = ath10k_usb_hif_diag_write,
+ .exchange_bmi_msg = ath10k_usb_bmi_exchange_msg,
+ .start = ath10k_usb_hif_start,
+ .stop = ath10k_usb_hif_stop,
+ .map_service_to_pipe = ath10k_usb_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_usb_hif_get_default_pipe,
+ .get_free_queue_number = ath10k_usb_hif_get_free_queue_number,
+ .power_up = ath10k_usb_hif_power_up,
+ .power_down = ath10k_usb_hif_power_down,
+#ifdef CONFIG_PM
+ .suspend = ath10k_usb_hif_suspend,
+ .resume = ath10k_usb_hif_resume,
+#endif
+};
+
+static u8 ath10k_usb_get_logical_pipe_num(u8 ep_address, int *urb_count)
+{
+ u8 pipe_num = ATH10K_USB_PIPE_INVALID;
+
+ switch (ep_address) {
+ case ATH10K_USB_EP_ADDR_APP_CTRL_IN:
+ pipe_num = ATH10K_USB_PIPE_RX_CTRL;
+ *urb_count = RX_URB_COUNT;
+ break;
+ case ATH10K_USB_EP_ADDR_APP_DATA_IN:
+ pipe_num = ATH10K_USB_PIPE_RX_DATA;
+ *urb_count = RX_URB_COUNT;
+ break;
+ case ATH10K_USB_EP_ADDR_APP_INT_IN:
+ pipe_num = ATH10K_USB_PIPE_RX_INT;
+ *urb_count = RX_URB_COUNT;
+ break;
+ case ATH10K_USB_EP_ADDR_APP_DATA2_IN:
+ pipe_num = ATH10K_USB_PIPE_RX_DATA2;
+ *urb_count = RX_URB_COUNT;
+ break;
+ case ATH10K_USB_EP_ADDR_APP_CTRL_OUT:
+ pipe_num = ATH10K_USB_PIPE_TX_CTRL;
+ *urb_count = TX_URB_COUNT;
+ break;
+ case ATH10K_USB_EP_ADDR_APP_DATA_LP_OUT:
+ pipe_num = ATH10K_USB_PIPE_TX_DATA_LP;
+ *urb_count = TX_URB_COUNT;
+ break;
+ case ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT:
+ pipe_num = ATH10K_USB_PIPE_TX_DATA_MP;
+ *urb_count = TX_URB_COUNT;
+ break;
+ case ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT:
+ pipe_num = ATH10K_USB_PIPE_TX_DATA_HP;
+ *urb_count = TX_URB_COUNT;
+ break;
+ default:
+ /* note: there may be endpoints not currently used */
+ break;
+ }
+
+ return pipe_num;
+}
+
+static int ath10k_usb_alloc_pipe_resources(struct ath10k *ar,
+ struct ath10k_usb_pipe *pipe,
+ int urb_cnt)
+{
+ struct ath10k_urb_context *urb_context;
+ int i;
+
+ INIT_LIST_HEAD(&pipe->urb_list_head);
+ init_usb_anchor(&pipe->urb_submitted);
+
+ for (i = 0; i < urb_cnt; i++) {
+ urb_context = kzalloc(sizeof(*urb_context), GFP_KERNEL);
+ if (!urb_context)
+ return -ENOMEM;
+
+ urb_context->pipe = pipe;
+
+ /* we are only allocate the urb contexts here, the actual URB
+ * is allocated from the kernel as needed to do a transaction
+ */
+ pipe->urb_alloc++;
+ ath10k_usb_free_urb_to_pipe(pipe, urb_context);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_USB,
+ "usb alloc resources lpipe %d hpipe 0x%x urbs %d\n",
+ pipe->logical_pipe_num, pipe->usb_pipe_handle,
+ pipe->urb_alloc);
+
+ return 0;
+}
+
+static int ath10k_usb_setup_pipe_resources(struct ath10k *ar,
+ struct usb_interface *interface)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ struct usb_host_interface *iface_desc = interface->cur_altsetting;
+ struct usb_endpoint_descriptor *endpoint;
+ struct ath10k_usb_pipe *pipe;
+ int ret, i, urbcount;
+ u8 pipe_num;
+
+ ath10k_dbg(ar, ATH10K_DBG_USB, "usb setting up pipes using interface\n");
+
+ /* walk descriptors and setup pipes */
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+
+ if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) {
+ ath10k_dbg(ar, ATH10K_DBG_USB,
+ "usb %s bulk ep 0x%2.2x maxpktsz %d\n",
+ ATH10K_USB_IS_DIR_IN
+ (endpoint->bEndpointAddress) ?
+ "rx" : "tx", endpoint->bEndpointAddress,
+ le16_to_cpu(endpoint->wMaxPacketSize));
+ } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) {
+ ath10k_dbg(ar, ATH10K_DBG_USB,
+ "usb %s int ep 0x%2.2x maxpktsz %d interval %d\n",
+ ATH10K_USB_IS_DIR_IN
+ (endpoint->bEndpointAddress) ?
+ "rx" : "tx", endpoint->bEndpointAddress,
+ le16_to_cpu(endpoint->wMaxPacketSize),
+ endpoint->bInterval);
+ } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
+ /* TODO for ISO */
+ ath10k_dbg(ar, ATH10K_DBG_USB,
+ "usb %s isoc ep 0x%2.2x maxpktsz %d interval %d\n",
+ ATH10K_USB_IS_DIR_IN
+ (endpoint->bEndpointAddress) ?
+ "rx" : "tx", endpoint->bEndpointAddress,
+ le16_to_cpu(endpoint->wMaxPacketSize),
+ endpoint->bInterval);
+ }
+
+ /* Ignore broken descriptors. */
+ if (usb_endpoint_maxp(endpoint) == 0)
+ continue;
+
+ urbcount = 0;
+
+ pipe_num =
+ ath10k_usb_get_logical_pipe_num(endpoint->bEndpointAddress,
+ &urbcount);
+ if (pipe_num == ATH10K_USB_PIPE_INVALID)
+ continue;
+
+ pipe = &ar_usb->pipes[pipe_num];
+ if (pipe->ar_usb)
+ /* hmmm..pipe was already setup */
+ continue;
+
+ pipe->ar_usb = ar_usb;
+ pipe->logical_pipe_num = pipe_num;
+ pipe->ep_address = endpoint->bEndpointAddress;
+ pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize);
+
+ if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) {
+ if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) {
+ pipe->usb_pipe_handle =
+ usb_rcvbulkpipe(ar_usb->udev,
+ pipe->ep_address);
+ } else {
+ pipe->usb_pipe_handle =
+ usb_sndbulkpipe(ar_usb->udev,
+ pipe->ep_address);
+ }
+ } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) {
+ if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) {
+ pipe->usb_pipe_handle =
+ usb_rcvintpipe(ar_usb->udev,
+ pipe->ep_address);
+ } else {
+ pipe->usb_pipe_handle =
+ usb_sndintpipe(ar_usb->udev,
+ pipe->ep_address);
+ }
+ } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
+ /* TODO for ISO */
+ if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) {
+ pipe->usb_pipe_handle =
+ usb_rcvisocpipe(ar_usb->udev,
+ pipe->ep_address);
+ } else {
+ pipe->usb_pipe_handle =
+ usb_sndisocpipe(ar_usb->udev,
+ pipe->ep_address);
+ }
+ }
+
+ pipe->ep_desc = endpoint;
+
+ if (!ATH10K_USB_IS_DIR_IN(pipe->ep_address))
+ pipe->flags |= ATH10K_USB_PIPE_FLAG_TX;
+
+ ret = ath10k_usb_alloc_pipe_resources(ar, pipe, urbcount);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_usb_create(struct ath10k *ar,
+ struct usb_interface *interface)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ struct usb_device *dev = interface_to_usbdev(interface);
+ struct ath10k_usb_pipe *pipe;
+ int ret, i;
+
+ usb_set_intfdata(interface, ar_usb);
+ spin_lock_init(&ar_usb->cs_lock);
+ ar_usb->udev = dev;
+ ar_usb->interface = interface;
+
+ for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) {
+ pipe = &ar_usb->pipes[i];
+ INIT_WORK(&pipe->io_complete_work,
+ ath10k_usb_io_comp_work);
+ skb_queue_head_init(&pipe->io_comp_queue);
+ }
+
+ ar_usb->diag_cmd_buffer = kzalloc(ATH10K_USB_MAX_DIAG_CMD, GFP_KERNEL);
+ if (!ar_usb->diag_cmd_buffer) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ar_usb->diag_resp_buffer = kzalloc(ATH10K_USB_MAX_DIAG_RESP,
+ GFP_KERNEL);
+ if (!ar_usb->diag_resp_buffer) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = ath10k_usb_setup_pipe_resources(ar, interface);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ ath10k_usb_destroy(ar);
+ return ret;
+}
+
+static int ath10k_usb_napi_poll(struct napi_struct *ctx, int budget)
+{
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ int done;
+
+ done = ath10k_htt_rx_hl_indication(ar, budget);
+ ath10k_dbg(ar, ATH10K_DBG_USB, "napi poll: done: %d, budget:%d\n", done, budget);
+
+ if (done < budget)
+ napi_complete_done(ctx, done);
+
+ return done;
+}
+
+/* ath10k usb driver registered functions */
+static int ath10k_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct ath10k *ar;
+ struct ath10k_usb *ar_usb;
+ struct usb_device *dev = interface_to_usbdev(interface);
+ int ret, vendor_id, product_id;
+ enum ath10k_hw_rev hw_rev;
+ struct ath10k_bus_params bus_params = {};
+
+ /* Assumption: All USB based chipsets (so far) are QCA9377 based.
+ * If there will be newer chipsets that does not use the hw reg
+ * setup as defined in qca6174_regs and qca6174_values, this
+ * assumption is no longer valid and hw_rev must be setup differently
+ * depending on chipset.
+ */
+ hw_rev = ATH10K_HW_QCA9377;
+
+ ar = ath10k_core_create(sizeof(*ar_usb), &dev->dev, ATH10K_BUS_USB,
+ hw_rev, &ath10k_usb_hif_ops);
+ if (!ar) {
+ dev_err(&dev->dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ netif_napi_add(ar->napi_dev, &ar->napi, ath10k_usb_napi_poll);
+
+ usb_get_dev(dev);
+ vendor_id = le16_to_cpu(dev->descriptor.idVendor);
+ product_id = le16_to_cpu(dev->descriptor.idProduct);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "usb new func vendor 0x%04x product 0x%04x\n",
+ vendor_id, product_id);
+
+ ar_usb = ath10k_usb_priv(ar);
+ ret = ath10k_usb_create(ar, interface);
+ if (ret)
+ goto err;
+ ar_usb->ar = ar;
+
+ ar->dev_id = product_id;
+ ar->id.vendor = vendor_id;
+ ar->id.device = product_id;
+
+ bus_params.dev_type = ATH10K_DEV_TYPE_HL;
+ /* TODO: don't know yet how to get chip_id with USB */
+ bus_params.chip_id = 0;
+ bus_params.hl_msdu_ids = true;
+ ret = ath10k_core_register(ar, &bus_params);
+ if (ret) {
+ ath10k_warn(ar, "failed to register driver core: %d\n", ret);
+ goto err_usb_destroy;
+ }
+
+ /* TODO: remove this once USB support is fully implemented */
+ ath10k_warn(ar, "Warning: ath10k USB support is incomplete, don't expect anything to work!\n");
+
+ return 0;
+
+err_usb_destroy:
+ ath10k_usb_destroy(ar);
+
+err:
+ ath10k_core_destroy(ar);
+
+ usb_put_dev(dev);
+
+ return ret;
+}
+
+static void ath10k_usb_remove(struct usb_interface *interface)
+{
+ struct ath10k_usb *ar_usb;
+
+ ar_usb = usb_get_intfdata(interface);
+ if (!ar_usb)
+ return;
+
+ ath10k_core_unregister(ar_usb->ar);
+ netif_napi_del(&ar_usb->ar->napi);
+ ath10k_usb_destroy(ar_usb->ar);
+ usb_put_dev(interface_to_usbdev(interface));
+ ath10k_core_destroy(ar_usb->ar);
+}
+
+#ifdef CONFIG_PM
+
+static int ath10k_usb_pm_suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ struct ath10k_usb *ar_usb = usb_get_intfdata(interface);
+
+ ath10k_usb_flush_all(ar_usb->ar);
+ return 0;
+}
+
+static int ath10k_usb_pm_resume(struct usb_interface *interface)
+{
+ struct ath10k_usb *ar_usb = usb_get_intfdata(interface);
+ struct ath10k *ar = ar_usb->ar;
+
+ ath10k_usb_post_recv_transfers(ar,
+ &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]);
+
+ return 0;
+}
+
+#else
+
+#define ath10k_usb_pm_suspend NULL
+#define ath10k_usb_pm_resume NULL
+
+#endif
+
+/* table of devices that work with this driver */
+static struct usb_device_id ath10k_usb_ids[] = {
+ {USB_DEVICE(0x13b1, 0x0042)}, /* Linksys WUSB6100M */
+ { /* Terminating entry */ },
+};
+
+MODULE_DEVICE_TABLE(usb, ath10k_usb_ids);
+
+static struct usb_driver ath10k_usb_driver = {
+ .name = "ath10k_usb",
+ .probe = ath10k_usb_probe,
+ .suspend = ath10k_usb_pm_suspend,
+ .resume = ath10k_usb_pm_resume,
+ .disconnect = ath10k_usb_remove,
+ .id_table = ath10k_usb_ids,
+ .supports_autosuspend = true,
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(ath10k_usb_driver);
+
+MODULE_AUTHOR("Atheros Communications, Inc.");
+MODULE_DESCRIPTION("Driver support for Qualcomm Atheros USB 802.11ac WLAN devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/usb.h b/drivers/net/wireless/ath/ath10k/usb.h
new file mode 100644
index 000000000000..7e4cfbb673c9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/usb.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _USB_H_
+#define _USB_H_
+
+/* constants */
+#define TX_URB_COUNT 32
+#define RX_URB_COUNT 32
+#define ATH10K_USB_RX_BUFFER_SIZE 4096
+
+#define ATH10K_USB_PIPE_INVALID ATH10K_USB_PIPE_MAX
+
+/* USB endpoint definitions */
+#define ATH10K_USB_EP_ADDR_APP_CTRL_IN 0x81
+#define ATH10K_USB_EP_ADDR_APP_DATA_IN 0x82
+#define ATH10K_USB_EP_ADDR_APP_DATA2_IN 0x83
+#define ATH10K_USB_EP_ADDR_APP_INT_IN 0x84
+
+#define ATH10K_USB_EP_ADDR_APP_CTRL_OUT 0x01
+#define ATH10K_USB_EP_ADDR_APP_DATA_LP_OUT 0x02
+#define ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT 0x03
+#define ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT 0x04
+
+/* diagnostic command definitions */
+#define ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD 1
+#define ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP 2
+#define ATH10K_USB_CONTROL_REQ_DIAG_CMD 3
+#define ATH10K_USB_CONTROL_REQ_DIAG_RESP 4
+
+#define ATH10K_USB_CTRL_DIAG_CC_READ 0
+#define ATH10K_USB_CTRL_DIAG_CC_WRITE 1
+
+#define ATH10K_USB_IS_BULK_EP(attr) (((attr) & 3) == 0x02)
+#define ATH10K_USB_IS_INT_EP(attr) (((attr) & 3) == 0x03)
+#define ATH10K_USB_IS_ISOC_EP(attr) (((attr) & 3) == 0x01)
+#define ATH10K_USB_IS_DIR_IN(addr) ((addr) & 0x80)
+
+struct ath10k_usb_ctrl_diag_cmd_write {
+ __le32 cmd;
+ __le32 address;
+ __le32 value;
+ __le32 padding;
+} __packed;
+
+struct ath10k_usb_ctrl_diag_cmd_read {
+ __le32 cmd;
+ __le32 address;
+} __packed;
+
+struct ath10k_usb_ctrl_diag_resp_read {
+ u8 value[4];
+} __packed;
+
+/* tx/rx pipes for usb */
+enum ath10k_usb_pipe_id {
+ ATH10K_USB_PIPE_TX_CTRL = 0,
+ ATH10K_USB_PIPE_TX_DATA_LP,
+ ATH10K_USB_PIPE_TX_DATA_MP,
+ ATH10K_USB_PIPE_TX_DATA_HP,
+ ATH10K_USB_PIPE_RX_CTRL,
+ ATH10K_USB_PIPE_RX_DATA,
+ ATH10K_USB_PIPE_RX_DATA2,
+ ATH10K_USB_PIPE_RX_INT,
+ ATH10K_USB_PIPE_MAX
+};
+
+struct ath10k_usb_pipe {
+ struct list_head urb_list_head;
+ struct usb_anchor urb_submitted;
+ u32 urb_alloc;
+ u32 urb_cnt;
+ u32 urb_cnt_thresh;
+ unsigned int usb_pipe_handle;
+ u32 flags;
+ u8 ep_address;
+ u8 logical_pipe_num;
+ struct ath10k_usb *ar_usb;
+ u16 max_packet_size;
+ struct work_struct io_complete_work;
+ struct sk_buff_head io_comp_queue;
+ struct usb_endpoint_descriptor *ep_desc;
+};
+
+#define ATH10K_USB_PIPE_FLAG_TX BIT(0)
+
+/* usb device object */
+struct ath10k_usb {
+ /* protects pipe->urb_list_head and pipe->urb_cnt */
+ spinlock_t cs_lock;
+
+ struct usb_device *udev;
+ struct usb_interface *interface;
+ struct ath10k_usb_pipe pipes[ATH10K_USB_PIPE_MAX];
+ u8 *diag_cmd_buffer;
+ u8 *diag_resp_buffer;
+ struct ath10k *ar;
+};
+
+/* usb urb object */
+struct ath10k_urb_context {
+ struct list_head link;
+ struct ath10k_usb_pipe *pipe;
+ struct sk_buff *skb;
+ struct ath10k *ar;
+};
+
+static inline struct ath10k_usb *ath10k_usb_priv(struct ath10k *ar)
+{
+ return (struct ath10k_usb *)ar->drv_priv;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
new file mode 100644
index 000000000000..f3f6b5954b27
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -0,0 +1,1710 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _WMI_OPS_H_
+#define _WMI_OPS_H_
+
+struct ath10k;
+struct sk_buff;
+
+struct wmi_ops {
+ void (*rx)(struct ath10k *ar, struct sk_buff *skb);
+ void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
+ void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
+
+ int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg);
+ int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg);
+ int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
+ int (*pull_mgmt_tx_bundle_compl)(
+ struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg);
+ int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg);
+ int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg);
+ int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg);
+ int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg);
+ int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_phyerr_hdr_arg *arg);
+ int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
+ int left_len, struct wmi_phyerr_ev_arg *arg);
+ int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg);
+ int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg);
+ int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
+ struct ath10k_fw_stats *stats);
+ int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg);
+ int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_wow_ev_arg *arg);
+ int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg);
+ int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_dfs_status_ev_arg *arg);
+ int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_avail_ev_arg *arg);
+
+ enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
+
+ struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
+ struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
+ struct sk_buff *(*gen_pdev_set_base_macaddr)(struct ath10k *ar,
+ const u8 macaddr[ETH_ALEN]);
+ struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
+ u16 rd5g, u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg);
+ struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
+ u32 value);
+ struct sk_buff *(*gen_init)(struct ath10k *ar);
+ struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg);
+ struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
+ const struct wmi_stop_scan_arg *arg);
+ struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_type type,
+ enum wmi_vdev_subtype subtype,
+ const u8 macaddr[ETH_ALEN]);
+ struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
+ struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg,
+ bool restart);
+ struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
+ struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid);
+ struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
+ struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value);
+ struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg);
+ struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
+ const struct wmi_vdev_spectral_conf_arg *arg);
+ struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
+ u32 trigger, u32 enable);
+ struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_wmm_params_all_arg *arg);
+ struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type);
+ struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN]);
+ struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ u32 tid_bitmap);
+ struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
+ const u8 *peer_addr,
+ enum wmi_peer_param param_id,
+ u32 param_value);
+ struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg);
+ struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode);
+ struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id,
+ u32 value);
+ struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id,
+ u32 value);
+ struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg);
+ struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
+ u32 prob_req_oui);
+ struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
+ const void *bcn, size_t bcn_len,
+ u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab);
+ struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg);
+ struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
+ struct sk_buff *(*gen_request_peer_stats_info)(struct ath10k *ar,
+ u32 vdev_id,
+ enum
+ wmi_peer_stats_info_request_type
+ type,
+ u8 *addr,
+ u32 reset);
+ struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type,
+ u32 delay_ms);
+ struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
+ struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
+ struct sk_buff *skb,
+ dma_addr_t paddr);
+ int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu);
+ struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
+ u32 log_level);
+ struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
+ struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
+ struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
+ u32 period, u32 duration,
+ u32 next_offset,
+ u32 enabled);
+ struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
+ struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac);
+ struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac, u32 tid, u32 buf_size);
+ struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac, u32 tid,
+ u32 status);
+ struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac, u32 tid, u32 initiator,
+ u32 reason);
+ struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
+ u32 tim_ie_offset, struct sk_buff *bcn,
+ u32 prb_caps, u32 prb_erp,
+ void *prb_ies, size_t prb_ies_len);
+ struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
+ struct sk_buff *bcn);
+ struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
+ const u8 *p2p_ie);
+ struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ const struct wmi_sta_uapsd_auto_trig_arg *args,
+ u32 num_ac);
+ struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
+ const struct wmi_sta_keepalive_arg *arg);
+ struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
+ struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable);
+ struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
+ struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id,
+ const u8 *pattern,
+ const u8 *mask,
+ int pattern_len,
+ int pattern_offset);
+ struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id);
+ struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_tdls_state state);
+ struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan);
+ struct sk_buff *(*gen_radar_found)
+ (struct ath10k *ar,
+ const struct ath10k_radar_found_info *arg);
+ struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
+ struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
+ u32 param);
+ void (*fw_stats_fill)(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf);
+ struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
+ u8 enable,
+ u32 detect_level,
+ u32 detect_margin);
+ struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap);
+ int (*get_vdev_subtype)(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype);
+ struct sk_buff *(*gen_wow_config_pno)(struct ath10k *ar,
+ u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan);
+ struct sk_buff *(*gen_pdev_bss_chan_info_req)
+ (struct ath10k *ar,
+ enum wmi_bss_survey_req_type type);
+ struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
+ struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
+ u32 param);
+ struct sk_buff *(*gen_bb_timing)
+ (struct ath10k *ar,
+ const struct wmi_bb_timing_cfg_arg *arg);
+ struct sk_buff *(*gen_per_peer_per_tid_cfg)(struct ath10k *ar,
+ const struct wmi_per_peer_per_tid_cfg_arg *arg);
+ struct sk_buff *(*gen_gpio_config)(struct ath10k *ar, u32 gpio_num,
+ u32 input, u32 pull_type, u32 intr_mode);
+
+ struct sk_buff *(*gen_gpio_output)(struct ath10k *ar, u32 gpio_num, u32 set);
+};
+
+int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
+
+static inline int
+ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ if (WARN_ON_ONCE(!ar->wmi.ops->rx))
+ return -EOPNOTSUPP;
+
+ ar->wmi.ops->rx(ar, skb);
+ return 0;
+}
+
+static inline int
+ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ if (!ar->wmi.ops->map_svc)
+ return -EOPNOTSUPP;
+
+ ar->wmi.ops->map_svc(in, out, len);
+ return 0;
+}
+
+static inline int
+ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ if (!ar->wmi.ops->map_svc_ext)
+ return -EOPNOTSUPP;
+
+ ar->wmi.ops->map_svc_ext(in, out, len);
+ return 0;
+}
+
+static inline int
+ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_scan)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_scan(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_mgmt_tx_compl)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_mgmt_tx_bundle_compl)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_mgmt_tx_bundle_compl(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_mgmt_rx)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_ch_info)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_ch_info(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_vdev_start)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_peer_kick)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_swba)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_swba(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_phyerr_hdr_arg *arg)
+{
+ if (!ar->wmi.ops->pull_phyerr_hdr)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
+ int left_len, struct wmi_phyerr_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_phyerr)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
+}
+
+static inline int
+ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_svc_rdy)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_rdy)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_rdy(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_avail_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_svc_avail)
+ return -EOPNOTSUPP;
+ return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ if (!ar->wmi.ops->pull_fw_stats)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
+}
+
+static inline int
+ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_roam_ev)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_wow_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_wow_event)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_wow_event(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_echo_ev)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_dfs_status_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_dfs_status_ev)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg);
+}
+
+static inline enum wmi_txbf_conf
+ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
+{
+ if (!ar->wmi.ops->get_txbf_conf_scheme)
+ return WMI_TXBF_CONF_UNSUPPORTED;
+
+ return ar->wmi.ops->get_txbf_conf_scheme(ar);
+}
+
+static inline int
+ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu)
+{
+ if (!ar->wmi.ops->cleanup_mgmt_tx_send)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu);
+}
+
+static inline int
+ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
+ dma_addr_t paddr)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ if (!ar->wmi.ops->gen_mgmt_tx_send)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->mgmt_tx_send_cmdid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static inline int
+ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
+ struct sk_buff *skb;
+ int ret;
+
+ if (!ar->wmi.ops->gen_mgmt_tx)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->mgmt_tx_cmdid);
+ if (ret)
+ return ret;
+
+ /* FIXME There's no ACK event for Management Tx. This probably
+ * shouldn't be called here either.
+ */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ return 0;
+}
+
+static inline int
+ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
+ u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_rd)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
+ dfs_reg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_regdomain_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_set_base_macaddr(struct ath10k *ar, const u8 macaddr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_base_macaddr)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_base_macaddr(ar, macaddr);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_base_macaddr_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_suspend)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_resume_target(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_resume)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_resume(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_param)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_cmd_init(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_init)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_init(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
+}
+
+static inline int
+ath10k_wmi_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_start_scan)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_start_scan(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
+}
+
+static inline int
+ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_stop_scan)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_stop_scan(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_type type,
+ enum wmi_vdev_subtype subtype,
+ const u8 macaddr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_create)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_delete)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_start(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_start)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_start_request_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_restart(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_start)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_restart_request_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_stop)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_up)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_down)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
+ u32 param_value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_set_param)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
+ param_value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_install_key(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_install_key)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_install_key_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
+ const struct wmi_vdev_spectral_conf_arg *arg)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_vdev_spectral_conf)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
+ u32 enable)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_vdev_spectral_enable)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
+ enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ const struct wmi_sta_uapsd_auto_trig_arg *args,
+ u32 num_ac)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_vdev_sta_uapsd)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
+ num_ac);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_create)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_delete)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_flush)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
+ enum wmi_peer_param param_id, u32 param_value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_set_param)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
+ param_value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_set_psmode)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->sta_powersave_mode_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id, u32 value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_set_sta_ps)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->sta_powersave_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_set_ap_ps)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->ap_ps_peer_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_scan_chan_list(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_scan_chan_list)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
+}
+
+static inline int
+ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+ u32 prob_req_oui;
+
+ prob_req_oui = (((u32)mac_addr[0]) << 16) |
+ (((u32)mac_addr[1]) << 8) | mac_addr[2];
+
+ if (!ar->wmi.ops->gen_scan_prob_req_oui)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->scan_prob_req_oui_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_assoc)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
+}
+
+static inline int
+ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
+ const void *bcn, size_t bcn_len,
+ u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ if (!ar->wmi.ops->gen_beacon_dma)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
+ dtim_zero, deliver_cab);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb,
+ ar->wmi.cmd->pdev_send_bcn_cmdid);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int
+ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_wmm)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_wmm_params_cmdid);
+}
+
+static inline int
+ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_request_stats)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
+}
+
+static inline int
+ath10k_wmi_request_peer_stats_info(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_peer_stats_info_request_type type,
+ u8 *addr,
+ u32 reset)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_request_peer_stats_info)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_request_peer_stats_info(ar,
+ vdev_id,
+ type,
+ addr,
+ reset);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_peer_stats_info_cmdid);
+}
+
+static inline int
+ath10k_wmi_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type, u32 delay_ms)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_force_fw_hang)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
+}
+
+static inline int ath10k_wmi_gpio_config(struct ath10k *ar, u32 gpio_num,
+ u32 input, u32 pull_type, u32 intr_mode)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_gpio_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_gpio_config(ar, gpio_num, input, pull_type, intr_mode);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_config_cmdid);
+}
+
+static inline int ath10k_wmi_gpio_output(struct ath10k *ar, u32 gpio_num, u32 set)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_gpio_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_gpio_output(ar, gpio_num, set);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_output_cmdid);
+}
+
+static inline int
+ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_dbglog_cfg)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pktlog_enable)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pktlog_disable)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pktlog_disable(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_pktlog_disable_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
+ u32 next_offset, u32 enabled)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
+ next_offset, enabled);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_get_temperature)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_get_temperature_cmdid);
+}
+
+static inline int
+ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_addba_clear_resp)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->addba_clear_resp_cmdid);
+}
+
+static inline int
+ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_addba_send)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->addba_send_cmdid);
+}
+
+static inline int
+ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_addba_set_resp)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->addba_set_resp_cmdid);
+}
+
+static inline int
+ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_delba_send)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
+ reason);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->delba_send_cmdid);
+}
+
+static inline int
+ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
+ struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
+ void *prb_ies, size_t prb_ies_len)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_bcn_tmpl)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
+ prb_caps, prb_erp, prb_ies,
+ prb_ies_len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
+}
+
+static inline int
+ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_prb_tmpl)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
+}
+
+static inline int
+ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
+}
+
+static inline int
+ath10k_wmi_sta_keepalive(struct ath10k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_sta_keepalive)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_enable(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_enable)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_enable(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_enable_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_add_wakeup_event)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_add_pattern)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
+ pattern, mask, pattern_len,
+ pattern_offset);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_del_pattern)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_config_pno(struct ath10k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_config_pno)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_config_pno(ar, vdev_id, pno_scan);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->network_list_offload_config_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tdls_state state)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_update_fw_tdls_state)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
+}
+
+static inline int
+ath10k_wmi_tdls_peer_update(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_tdls_peer_update)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->tdls_peer_update_cmdid);
+}
+
+static inline int
+ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_adaptive_qcs)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_get_tpc_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_get_tpc_config_cmdid);
+}
+
+static inline int
+ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
+ char *buf)
+{
+ if (!ar->wmi.ops->fw_stats_fill)
+ return -EOPNOTSUPP;
+
+ ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
+ return 0;
+}
+
+static inline int
+ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
+ u32 detect_level, u32 detect_margin)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
+ detect_level,
+ detect_margin);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
+}
+
+static inline int
+ath10k_wmi_ext_resource_config(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->ext_resource_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->ext_resource_config(ar, type,
+ fw_feature_bitmap);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->ext_resource_cfg_cmdid);
+}
+
+static inline int
+ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
+{
+ if (!ar->wmi.ops->get_vdev_subtype)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->get_vdev_subtype(ar, subtype);
+}
+
+static inline int
+ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
+ enum wmi_bss_survey_req_type type)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct sk_buff *skb;
+
+ if (!wmi->ops->gen_pdev_bss_chan_info_req)
+ return -EOPNOTSUPP;
+
+ skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ wmi->cmd->pdev_bss_chan_info_request_cmdid);
+}
+
+static inline int
+ath10k_wmi_echo(struct ath10k *ar, u32 value)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct sk_buff *skb;
+
+ if (!wmi->ops->gen_echo)
+ return -EOPNOTSUPP;
+
+ skb = wmi->ops->gen_echo(ar, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_get_tpc_table_cmdid);
+}
+
+static inline int
+ath10k_wmi_report_radar_found(struct ath10k *ar,
+ const struct ath10k_radar_found_info *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_radar_found)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_radar_found(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->radar_found_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_bb_timing(struct ath10k *ar,
+ const struct wmi_bb_timing_cfg_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_bb_timing)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_bb_timing(ar, arg);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->set_bb_timing_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_per_peer_per_tid_cfg(struct ath10k *ar,
+ const struct wmi_per_peer_per_tid_cfg_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_per_peer_per_tid_cfg)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_per_peer_per_tid_cfg(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->per_peer_per_tid_config_cmdid);
+}
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
new file mode 100644
index 000000000000..16d07d619b4d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -0,0 +1,4649 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include "core.h"
+#include "debug.h"
+#include "mac.h"
+#include "hw.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+#include "wmi-tlv.h"
+#include "p2p.h"
+#include "testmode.h"
+#include <linux/bitfield.h>
+
+/***************/
+/* TLV helpers */
+/**************/
+
+struct wmi_tlv_policy {
+ size_t min_len;
+};
+
+static const struct wmi_tlv_policy wmi_tlv_policies[] = {
+ [WMI_TLV_TAG_ARRAY_BYTE]
+ = { .min_len = 0 },
+ [WMI_TLV_TAG_ARRAY_UINT32]
+ = { .min_len = 0 },
+ [WMI_TLV_TAG_STRUCT_SCAN_EVENT]
+ = { .min_len = sizeof(struct wmi_scan_event) },
+ [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
+ = { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
+ [WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
+ = { .min_len = sizeof(struct wmi_chan_info_event) },
+ [WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_start_response_event) },
+ [WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
+ [WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
+ = { .min_len = sizeof(struct wmi_host_swba_event) },
+ [WMI_TLV_TAG_STRUCT_TIM_INFO]
+ = { .min_len = sizeof(struct wmi_tim_info) },
+ [WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
+ = { .min_len = sizeof(struct wmi_p2p_noa_info) },
+ [WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
+ [WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
+ = { .min_len = sizeof(struct hal_reg_capabilities) },
+ [WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
+ = { .min_len = sizeof(struct wlan_host_mem_req) },
+ [WMI_TLV_TAG_STRUCT_READY_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
+ [WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
+ [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
+ [WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
+ [WMI_TLV_TAG_STRUCT_ROAM_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_roam_ev) },
+ [WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
+ = { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
+ [WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
+};
+
+static int
+ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
+ int (*iter)(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const void *begin = ptr;
+ const struct wmi_tlv *tlv;
+ u16 tlv_tag, tlv_len;
+ int ret;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+
+ tlv = ptr;
+ tlv_tag = __le16_to_cpu(tlv->tag);
+ tlv_len = __le16_to_cpu(tlv->len);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+
+ if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
+ wmi_tlv_policies[tlv_tag].min_len &&
+ wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
+ tlv_tag, ptr - begin, tlv_len,
+ wmi_tlv_policies[tlv_tag].min_len);
+ return -EINVAL;
+ }
+
+ ret = iter(ar, tlv_tag, tlv_len, ptr, data);
+ if (ret)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const void **tb = data;
+
+ if (tag < WMI_TLV_TAG_MAX)
+ tb[tag] = ptr;
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
+ const void *ptr, size_t len)
+{
+ return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
+ (void *)tb);
+}
+
+static const void **
+ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
+ size_t len, gfp_t gfp)
+{
+ const void **tb;
+ int ret;
+
+ tb = kcalloc(WMI_TLV_TAG_MAX, sizeof(*tb), gfp);
+ if (!tb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
+ if (ret) {
+ kfree(tb);
+ return ERR_PTR(ret);
+ }
+
+ return tb;
+}
+
+static u16 ath10k_wmi_tlv_len(const void *ptr)
+{
+ return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
+}
+
+/**************/
+/* TLV events */
+/**************/
+static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_bcn_tx_status_ev *ev;
+ struct ath10k_vif *arvif;
+ u32 vdev_id, tx_status;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ tx_status = __le32_to_cpu(ev->tx_status);
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+
+ switch (tx_status) {
+ case WMI_TLV_BCN_TX_STATUS_OK:
+ break;
+ case WMI_TLV_BCN_TX_STATUS_XRETRY:
+ case WMI_TLV_BCN_TX_STATUS_DROP:
+ case WMI_TLV_BCN_TX_STATUS_FILTERED:
+ /* FIXME: It's probably worth telling mac80211 to stop the
+ * interface as it is crippled.
+ */
+ ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
+ vdev_id, tx_status);
+ break;
+ }
+
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (arvif && arvif->is_up && arvif->vif->bss_conf.csa_active)
+ ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath10k_wmi_tlv_event_vdev_delete_resp(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_DELETE_RESP_EVENTID\n");
+ complete(&ar->vdev_delete_done);
+}
+
+static int ath10k_wmi_tlv_parse_peer_stats_info(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const struct wmi_tlv_peer_stats_info *stat = ptr;
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+
+ if (tag != WMI_TLV_TAG_STRUCT_PEER_STATS_INFO)
+ return -EPROTO;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv stats peer addr %pMF rx rate code 0x%x bit rate %d kbps\n",
+ stat->peer_macaddr.addr,
+ __le32_to_cpu(stat->last_rx_rate_code),
+ __le32_to_cpu(stat->last_rx_bitrate_kbps));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv stats tx rate code 0x%x bit rate %d kbps\n",
+ __le32_to_cpu(stat->last_tx_rate_code),
+ __le32_to_cpu(stat->last_tx_bitrate_kbps));
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, stat->peer_macaddr.addr, NULL);
+ if (!sta) {
+ rcu_read_unlock();
+ ath10k_warn(ar, "not found station for peer stats\n");
+ return -EINVAL;
+ }
+
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ arsta->rx_rate_code = __le32_to_cpu(stat->last_rx_rate_code);
+ arsta->rx_bitrate_kbps = __le32_to_cpu(stat->last_rx_bitrate_kbps);
+ arsta->tx_rate_code = __le32_to_cpu(stat->last_tx_rate_code);
+ arsta->tx_bitrate_kbps = __le32_to_cpu(stat->last_tx_bitrate_kbps);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_peer_stats_info(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_peer_stats_info_ev *ev;
+ const void *data;
+ u32 num_peer_stats;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT];
+ data = tb[WMI_TLV_TAG_ARRAY_STRUCT];
+
+ if (!ev || !data) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ num_peer_stats = __le32_to_cpu(ev->num_peers);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv peer stats info update peer vdev id %d peers %i more data %d\n",
+ __le32_to_cpu(ev->vdev_id),
+ num_peer_stats,
+ __le32_to_cpu(ev->more_data));
+
+ ret = ath10k_wmi_tlv_iter(ar, data, ath10k_wmi_tlv_len(data),
+ ath10k_wmi_tlv_parse_peer_stats_info, NULL);
+ if (ret)
+ ath10k_warn(ar, "failed to parse stats info tlv: %d\n", ret);
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath10k_wmi_tlv_event_peer_stats_info(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PEER_STATS_INFO_EVENTID\n");
+ ath10k_wmi_tlv_op_pull_peer_stats_info(ar, skb);
+ complete(&ar->peer_stats_info_complete);
+}
+
+static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_diag_data_ev *ev;
+ const struct wmi_tlv_diag_item *item;
+ const void *data;
+ int ret, num_items, len;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
+ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
+ if (!ev || !data) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ num_items = __le32_to_cpu(ev->num_items);
+ len = ath10k_wmi_tlv_len(data);
+
+ while (num_items--) {
+ if (len == 0)
+ break;
+ if (len < sizeof(*item)) {
+ ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
+ break;
+ }
+
+ item = data;
+
+ if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
+ ath10k_warn(ar, "failed to parse diag data: item is too long\n");
+ break;
+ }
+
+ trace_ath10k_wmi_diag_container(ar,
+ item->type,
+ __le32_to_cpu(item->timestamp),
+ __le32_to_cpu(item->code),
+ __le16_to_cpu(item->len),
+ item->payload);
+
+ len -= sizeof(*item);
+ len -= roundup(__le16_to_cpu(item->len), 4);
+
+ data += sizeof(*item);
+ data += roundup(__le16_to_cpu(item->len), 4);
+ }
+
+ if (num_items != -1 || len != 0)
+ ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
+ num_items, len);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const void *data;
+ int ret, len;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
+ if (!data) {
+ kfree(tb);
+ return -EPROTO;
+ }
+ len = ath10k_wmi_tlv_len(data);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
+ trace_ath10k_wmi_diag(ar, data, len);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_p2p_noa_ev *ev;
+ const struct wmi_p2p_noa_info *noa;
+ int ret, vdev_id;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
+ noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
+
+ if (!ev || !noa) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv p2p noa vdev_id %i descriptors %u\n",
+ vdev_id, noa->num_descriptors);
+
+ ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_tx_pause_ev *ev;
+ int ret, vdev_id;
+ u32 pause_id, action, vdev_map, peer_id, tid_map;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ pause_id = __le32_to_cpu(ev->pause_id);
+ action = __le32_to_cpu(ev->action);
+ vdev_map = __le32_to_cpu(ev->vdev_map);
+ peer_id = __le32_to_cpu(ev->peer_id);
+ tid_map = __le32_to_cpu(ev->tid_map);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
+ pause_id, action, vdev_map, peer_id, tid_map);
+
+ switch (pause_id) {
+ case WMI_TLV_TX_PAUSE_ID_MCC:
+ case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
+ case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
+ case WMI_TLV_TX_PAUSE_ID_AP_PS:
+ case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
+ for (vdev_id = 0; vdev_map; vdev_id++) {
+ if (!(vdev_map & BIT(vdev_id)))
+ continue;
+
+ vdev_map &= ~BIT(vdev_id);
+ ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
+ action);
+ }
+ break;
+ case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
+ case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
+ case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
+ case WMI_TLV_TX_PAUSE_ID_HOST:
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac ignoring unsupported tx pause id %d\n",
+ pause_id);
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac ignoring unknown tx pause vdev %d\n",
+ pause_id);
+ break;
+ }
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath10k_wmi_tlv_event_rfkill_state_change(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct wmi_tlv_rfkill_state_change_ev *ev;
+ const void **tb;
+ bool radio;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar,
+ "failed to parse rfkill state change event: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_RFKILL_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
+ __le32_to_cpu(ev->gpio_pin_num),
+ __le32_to_cpu(ev->int_type),
+ __le32_to_cpu(ev->radio_state));
+
+ radio = (__le32_to_cpu(ev->radio_state) == WMI_TLV_RFKILL_RADIO_STATE_ON);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (!radio)
+ ar->hw_rfkill_on = true;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ /* notify cfg80211 radio state change */
+ ath10k_mac_rfkill_enable_radio(ar, radio);
+ wiphy_rfkill_set_hw_state(ar->hw->wiphy, !radio);
+}
+
+static int ath10k_wmi_tlv_event_temperature(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct wmi_tlv_pdev_temperature_event *ev;
+
+ ev = (struct wmi_tlv_pdev_temperature_event *)skb->data;
+ if (WARN_ON(skb->len < sizeof(*ev)))
+ return -EPROTO;
+
+ ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
+ return 0;
+}
+
+static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_sta *station;
+ const struct wmi_tlv_tdls_peer_event *ev;
+ const void **tb;
+ struct ath10k_vif *arvif;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ath10k_warn(ar, "tdls peer failed to parse tlv");
+ return;
+ }
+ ev = tb[WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT];
+ if (!ev) {
+ kfree(tb);
+ ath10k_warn(ar, "tdls peer NULL event");
+ return;
+ }
+
+ switch (__le32_to_cpu(ev->peer_reason)) {
+ case WMI_TDLS_TEARDOWN_REASON_TX:
+ case WMI_TDLS_TEARDOWN_REASON_RSSI:
+ case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
+ rcu_read_lock();
+ station = ieee80211_find_sta_by_ifaddr(ar->hw,
+ ev->peer_macaddr.addr,
+ NULL);
+ if (!station) {
+ ath10k_warn(ar, "did not find station from tdls peer event");
+ goto exit;
+ }
+
+ arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
+ if (!arvif) {
+ ath10k_warn(ar, "no vif for vdev_id %d found",
+ __le32_to_cpu(ev->vdev_id));
+ goto exit;
+ }
+
+ ieee80211_tdls_oper_request(
+ arvif->vif, station->addr,
+ NL80211_TDLS_TEARDOWN,
+ WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE,
+ GFP_ATOMIC
+ );
+ break;
+ default:
+ kfree(tb);
+ return;
+ }
+
+exit:
+ rcu_read_unlock();
+ kfree(tb);
+}
+
+static int ath10k_wmi_tlv_event_peer_delete_resp(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct wmi_peer_delete_resp_ev_arg *arg;
+ struct wmi_tlv *tlv_hdr;
+
+ tlv_hdr = (struct wmi_tlv *)skb->data;
+ arg = (struct wmi_peer_delete_resp_ev_arg *)tlv_hdr->value;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "vdev id %d", arg->vdev_id);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "peer mac addr %pM", &arg->peer_addr);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete response\n");
+
+ complete(&ar->peer_delete_done);
+
+ return 0;
+}
+
+/***********/
+/* TLV ops */
+/***********/
+
+static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_tlv_event_id id;
+ bool consumed;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ goto out;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+ /* Ready event must be handled normally also in UTF mode so that we
+ * know the UTF firmware has booted, others we are just bypass WMI
+ * events to testmode.
+ */
+ if (consumed && id != WMI_TLV_READY_EVENTID) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv testmode consumed 0x%x\n", id);
+ goto out;
+ }
+
+ switch (id) {
+ case WMI_TLV_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_TLV_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ break;
+ case WMI_TLV_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_TLV_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_TLV_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ break;
+ case WMI_TLV_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_TLV_PEER_STATS_INFO_EVENTID:
+ ath10k_wmi_tlv_event_peer_stats_info(ar, skb);
+ break;
+ case WMI_TLV_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ break;
+ case WMI_TLV_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ break;
+ case WMI_TLV_VDEV_DELETE_RESP_EVENTID:
+ ath10k_wmi_tlv_event_vdev_delete_resp(ar, skb);
+ break;
+ case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_TLV_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_TLV_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_TLV_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ break;
+ case WMI_TLV_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_TLV_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ break;
+ case WMI_TLV_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_TLV_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_TLV_PDEV_FTM_INTG_EVENTID:
+ ath10k_wmi_event_pdev_ftm_intg(ar, skb);
+ break;
+ case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
+ ath10k_wmi_event_gtk_offload_status(ar, skb);
+ break;
+ case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
+ ath10k_wmi_event_gtk_rekey_fail(ar, skb);
+ break;
+ case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_delba_complete(ar, skb);
+ break;
+ case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_addba_complete(ar, skb);
+ break;
+ case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+ ath10k_wmi_event_vdev_install_key_complete(ar, skb);
+ break;
+ case WMI_TLV_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ return;
+ case WMI_TLV_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ break;
+ case WMI_TLV_SERVICE_AVAILABLE_EVENTID:
+ ath10k_wmi_event_service_available(ar, skb);
+ break;
+ case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
+ ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
+ break;
+ case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
+ ath10k_wmi_tlv_event_diag_data(ar, skb);
+ break;
+ case WMI_TLV_DIAG_EVENTID:
+ ath10k_wmi_tlv_event_diag(ar, skb);
+ break;
+ case WMI_TLV_P2P_NOA_EVENTID:
+ ath10k_wmi_tlv_event_p2p_noa(ar, skb);
+ break;
+ case WMI_TLV_TX_PAUSE_EVENTID:
+ ath10k_wmi_tlv_event_tx_pause(ar, skb);
+ break;
+ case WMI_TLV_RFKILL_STATE_CHANGE_EVENTID:
+ ath10k_wmi_tlv_event_rfkill_state_change(ar, skb);
+ break;
+ case WMI_TLV_PDEV_TEMPERATURE_EVENTID:
+ ath10k_wmi_tlv_event_temperature(ar, skb);
+ break;
+ case WMI_TLV_TDLS_PEER_EVENTID:
+ ath10k_wmi_event_tdls_peer(ar, skb);
+ break;
+ case WMI_TLV_PEER_DELETE_RESP_EVENTID:
+ ath10k_wmi_tlv_event_peer_delete_resp(ar, skb);
+ break;
+ case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
+ ath10k_wmi_event_mgmt_tx_compl(ar, skb);
+ break;
+ case WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID:
+ ath10k_wmi_event_mgmt_tx_bundle_compl(ar, skb);
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_scan_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->event_type = ev->event_type;
+ arg->reason = ev->reason;
+ arg->channel_freq = ev->channel_freq;
+ arg->scan_req_id = ev->scan_req_id;
+ arg->scan_id = ev->scan_id;
+ arg->vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_mgmt_tx_compl_ev *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->desc_id = ev->desc_id;
+ arg->status = ev->status;
+ arg->pdev_id = ev->pdev_id;
+ arg->ppdu_id = ev->ppdu_id;
+
+ if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
+ arg->ack_rssi = ev->ack_rssi;
+
+ kfree(tb);
+ return 0;
+}
+
+struct wmi_tlv_tx_bundle_compl_parse {
+ const __le32 *num_reports;
+ const __le32 *desc_ids;
+ const __le32 *status;
+ const __le32 *ppdu_ids;
+ const __le32 *ack_rssi;
+ bool desc_ids_done;
+ bool status_done;
+ bool ppdu_ids_done;
+ bool ack_rssi_done;
+};
+
+static int
+ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_tx_bundle_compl_parse *bundle_tx_compl = data;
+
+ switch (tag) {
+ case WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT:
+ bundle_tx_compl->num_reports = ptr;
+ break;
+ case WMI_TLV_TAG_ARRAY_UINT32:
+ if (!bundle_tx_compl->desc_ids_done) {
+ bundle_tx_compl->desc_ids_done = true;
+ bundle_tx_compl->desc_ids = ptr;
+ } else if (!bundle_tx_compl->status_done) {
+ bundle_tx_compl->status_done = true;
+ bundle_tx_compl->status = ptr;
+ } else if (!bundle_tx_compl->ppdu_ids_done) {
+ bundle_tx_compl->ppdu_ids_done = true;
+ bundle_tx_compl->ppdu_ids = ptr;
+ } else if (!bundle_tx_compl->ack_rssi_done) {
+ bundle_tx_compl->ack_rssi_done = true;
+ bundle_tx_compl->ack_rssi = ptr;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev(
+ struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg *arg)
+{
+ struct wmi_tlv_tx_bundle_compl_parse bundle_tx_compl = { };
+ int ret;
+
+ ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+ ath10k_wmi_tlv_mgmt_tx_bundle_compl_parse,
+ &bundle_tx_compl);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ if (!bundle_tx_compl.num_reports || !bundle_tx_compl.desc_ids ||
+ !bundle_tx_compl.status)
+ return -EPROTO;
+
+ arg->num_reports = *bundle_tx_compl.num_reports;
+ arg->desc_ids = bundle_tx_compl.desc_ids;
+ arg->status = bundle_tx_compl.status;
+ arg->ppdu_ids = bundle_tx_compl.ppdu_ids;
+
+ if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
+ arg->ack_rssi = bundle_tx_compl.ack_rssi;
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_mgmt_rx_ev *ev;
+ const u8 *frame;
+ u32 msdu_len;
+ int ret, i;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
+ frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
+
+ if (!ev || !frame) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->channel = ev->channel;
+ arg->buf_len = ev->buf_len;
+ arg->status = ev->status;
+ arg->snr = ev->snr;
+ arg->phy_mode = ev->phy_mode;
+ arg->rate = ev->rate;
+
+ for (i = 0; i < ARRAY_SIZE(ev->rssi); i++)
+ arg->rssi[i] = ev->rssi[i];
+
+ msdu_len = __le32_to_cpu(arg->buf_len);
+
+ if (skb->len < (frame - skb->data) + msdu_len) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ /* shift the sk_buff to point to `frame` */
+ skb_trim(skb, 0);
+ skb_put(skb, frame - skb->data);
+ skb_pull(skb, frame - skb->data);
+ skb_put(skb, msdu_len);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_chan_info_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->err_code = ev->err_code;
+ arg->freq = ev->freq;
+ arg->cmd_flags = ev->cmd_flags;
+ arg->noise_floor = ev->noise_floor;
+ arg->rx_clear_count = ev->rx_clear_count;
+ arg->cycle_count = ev->cycle_count;
+ if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
+ ar->running_fw->fw_file.fw_features))
+ arg->mac_clk_mhz = ev->mac_clk_mhz;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_vdev_start_response_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_id = ev->vdev_id;
+ arg->req_id = ev->req_id;
+ arg->resp_type = ev->resp_type;
+ arg->status = ev->status;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_peer_sta_kickout_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->mac_addr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+struct wmi_tlv_swba_parse {
+ const struct wmi_host_swba_event *ev;
+ bool tim_done;
+ bool noa_done;
+ size_t n_tim;
+ size_t n_noa;
+ struct wmi_swba_ev_arg *arg;
+};
+
+static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_swba_parse *swba = data;
+ struct wmi_tim_info_arg *tim_info_arg;
+ const struct wmi_tim_info *tim_info_ev = ptr;
+
+ if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
+ return -EPROTO;
+
+ if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
+ return -ENOBUFS;
+
+ if (__le32_to_cpu(tim_info_ev->tim_len) >
+ sizeof(tim_info_ev->tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ tim_info_arg = &swba->arg->tim_info[swba->n_tim];
+ tim_info_arg->tim_len = tim_info_ev->tim_len;
+ tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
+ tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
+ tim_info_arg->tim_changed = tim_info_ev->tim_changed;
+ tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
+
+ swba->n_tim++;
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_swba_parse *swba = data;
+
+ if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
+ return -EPROTO;
+
+ if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
+ return -ENOBUFS;
+
+ swba->arg->noa_info[swba->n_noa++] = ptr;
+ return 0;
+}
+
+static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_swba_parse *swba = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
+ swba->ev = ptr;
+ break;
+ case WMI_TLV_TAG_ARRAY_STRUCT:
+ if (!swba->tim_done) {
+ swba->tim_done = true;
+ ret = ath10k_wmi_tlv_iter(ar, ptr, len,
+ ath10k_wmi_tlv_swba_tim_parse,
+ swba);
+ if (ret)
+ return ret;
+ } else if (!swba->noa_done) {
+ swba->noa_done = true;
+ ret = ath10k_wmi_tlv_iter(ar, ptr, len,
+ ath10k_wmi_tlv_swba_noa_parse,
+ swba);
+ if (ret)
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ struct wmi_tlv_swba_parse swba = { .arg = arg };
+ u32 map;
+ size_t n_vdevs;
+ int ret;
+
+ ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+ ath10k_wmi_tlv_swba_parse, &swba);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ if (!swba.ev)
+ return -EPROTO;
+
+ arg->vdev_map = swba.ev->vdev_map;
+
+ for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
+ if (map & BIT(0))
+ n_vdevs++;
+
+ if (n_vdevs != swba.n_tim ||
+ n_vdevs != swba.n_noa)
+ return -EPROTO;
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_phyerr_hdr_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_phyerr_ev *ev;
+ const void *phyerrs;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
+ phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
+
+ if (!ev || !phyerrs) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
+ arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+ arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+ arg->buf_len = __le32_to_cpu(ev->buf_len);
+ arg->phyerrs = phyerrs;
+
+ kfree(tb);
+ return 0;
+}
+
+#define WMI_TLV_ABI_VER_NS0 0x5F414351
+#define WMI_TLV_ABI_VER_NS1 0x00004C4D
+#define WMI_TLV_ABI_VER_NS2 0x00000000
+#define WMI_TLV_ABI_VER_NS3 0x00000000
+
+#define WMI_TLV_ABI_VER0_MAJOR 1
+#define WMI_TLV_ABI_VER0_MINOR 0
+#define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
+ (((WMI_TLV_ABI_VER0_MINOR) << 0) & 0x00FFFFFF))
+#define WMI_TLV_ABI_VER1 53
+
+static int
+ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_svc_rdy_ev_arg *arg = data;
+ int i;
+
+ if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
+ return -EPROTO;
+
+ for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
+ if (!arg->mem_reqs[i]) {
+ arg->mem_reqs[i] = ptr;
+ return 0;
+ }
+ }
+
+ return -ENOMEM;
+}
+
+struct wmi_tlv_svc_rdy_parse {
+ const struct hal_reg_capabilities *reg;
+ const struct wmi_tlv_svc_rdy_ev *ev;
+ const __le32 *svc_bmap;
+ const struct wlan_host_mem_req *mem_reqs;
+ bool svc_bmap_done;
+ bool dbs_hw_mode_done;
+};
+
+static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_parse *svc_rdy = data;
+
+ switch (tag) {
+ case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT:
+ svc_rdy->ev = ptr;
+ break;
+ case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES:
+ svc_rdy->reg = ptr;
+ break;
+ case WMI_TLV_TAG_ARRAY_STRUCT:
+ svc_rdy->mem_reqs = ptr;
+ break;
+ case WMI_TLV_TAG_ARRAY_UINT32:
+ if (!svc_rdy->svc_bmap_done) {
+ svc_rdy->svc_bmap_done = true;
+ svc_rdy->svc_bmap = ptr;
+ } else if (!svc_rdy->dbs_hw_mode_done) {
+ svc_rdy->dbs_hw_mode_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ const struct hal_reg_capabilities *reg;
+ const struct wmi_tlv_svc_rdy_ev *ev;
+ const __le32 *svc_bmap;
+ const struct wlan_host_mem_req *mem_reqs;
+ struct wmi_tlv_svc_rdy_parse svc_rdy = { };
+ int ret;
+
+ ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+ ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = svc_rdy.ev;
+ reg = svc_rdy.reg;
+ svc_bmap = svc_rdy.svc_bmap;
+ mem_reqs = svc_rdy.mem_reqs;
+
+ if (!ev || !reg || !svc_bmap || !mem_reqs)
+ return -EPROTO;
+
+ /* This is an internal ABI compatibility check for WMI TLV so check it
+ * here instead of the generic WMI code.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
+ __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
+ __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
+ __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
+ __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
+ __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
+
+ if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
+ return -EOPNOTSUPP;
+ }
+
+ arg->min_tx_power = ev->hw_min_tx_power;
+ arg->max_tx_power = ev->hw_max_tx_power;
+ arg->ht_cap = ev->ht_cap_info;
+ arg->vht_cap = ev->vht_cap_info;
+ arg->vht_supp_mcs = ev->vht_supp_mcs;
+ arg->sw_ver0 = ev->abi.abi_ver0;
+ arg->sw_ver1 = ev->abi.abi_ver1;
+ arg->fw_build = ev->fw_build_vers;
+ arg->phy_capab = ev->phy_capability;
+ arg->num_rf_chains = ev->num_rf_chains;
+ arg->eeprom_rd = reg->eeprom_rd;
+ arg->low_2ghz_chan = reg->low_2ghz_chan;
+ arg->high_2ghz_chan = reg->high_2ghz_chan;
+ arg->low_5ghz_chan = reg->low_5ghz_chan;
+ arg->high_5ghz_chan = reg->high_5ghz_chan;
+ arg->num_mem_reqs = ev->num_mem_reqs;
+ arg->service_map = svc_bmap;
+ arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
+ arg->sys_cap_info = ev->sys_cap_info;
+
+ ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
+ ath10k_wmi_tlv_parse_mem_reqs, arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_rdy_ev *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->sw_version = ev->abi.abi_ver0;
+ arg->abi_version = ev->abi.abi_ver1;
+ arg->status = ev->status;
+ arg->mac_addr = ev->mac_addr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_svc_avail_ev_arg *arg = data;
+
+ switch (tag) {
+ case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT:
+ arg->service_map_ext_valid = true;
+ arg->service_map_ext_len = *(__le32 *)ptr;
+ arg->service_map_ext = ptr + sizeof(__le32);
+ return 0;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_svc_avail_ev_arg *arg)
+{
+ int ret;
+
+ ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+ ath10k_wmi_tlv_svc_avail_parse, arg);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to parse svc_avail tlv: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
+ struct ath10k_fw_stats_vdev *dst)
+{
+ int i;
+
+ dst->vdev_id = __le32_to_cpu(src->vdev_id);
+ dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
+ dst->data_snr = __le32_to_cpu(src->data_snr);
+ dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
+ dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
+ dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
+ dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
+ dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
+ dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
+ dst->num_tx_frames[i] =
+ __le32_to_cpu(src->num_tx_frames[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
+ dst->num_tx_frames_retries[i] =
+ __le32_to_cpu(src->num_tx_frames_retries[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
+ dst->num_tx_frames_failures[i] =
+ __le32_to_cpu(src->num_tx_frames_failures[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
+ dst->tx_rate_history[i] =
+ __le32_to_cpu(src->tx_rate_history[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
+ dst->beacon_rssi_history[i] =
+ __le32_to_cpu(src->beacon_rssi_history[i]);
+}
+
+static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const void **tb;
+ const struct wmi_tlv_stats_ev *ev;
+ u32 num_peer_stats_extd;
+ const void *data;
+ u32 num_pdev_stats;
+ u32 num_vdev_stats;
+ u32 num_peer_stats;
+ u32 num_bcnflt_stats;
+ u32 num_chan_stats;
+ size_t data_len;
+ u32 stats_id;
+ int ret;
+ int i;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
+ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
+
+ if (!ev || !data) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ data_len = ath10k_wmi_tlv_len(data);
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+ num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
+ num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
+ stats_id = __le32_to_cpu(ev->stats_id);
+ num_peer_stats_extd = __le32_to_cpu(ev->num_peer_stats_extd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i peer_extd %i\n",
+ num_pdev_stats, num_vdev_stats, num_peer_stats,
+ num_bcnflt_stats, num_chan_stats, num_peer_stats_extd);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = data;
+ if (data_len < sizeof(*src)) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ data += sizeof(*src);
+ data_len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < num_vdev_stats; i++) {
+ const struct wmi_tlv_vdev_stats *src;
+ struct ath10k_fw_stats_vdev *dst;
+
+ src = data;
+ if (data_len < sizeof(*src)) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ data += sizeof(*src);
+ data_len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_tlv_pull_vdev_stats(src, dst);
+ list_add_tail(&dst->list, &stats->vdevs);
+ }
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10x_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = data;
+ if (data_len < sizeof(*src)) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ data += sizeof(*src);
+ data_len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->old, dst);
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+
+ if (stats_id & WMI_TLV_STAT_PEER_EXTD) {
+ const struct wmi_tlv_peer_stats_extd *extd;
+ unsigned long rx_duration_high;
+
+ extd = data + sizeof(*src) * (num_peer_stats - i - 1)
+ + sizeof(*extd) * i;
+
+ dst->rx_duration = __le32_to_cpu(extd->rx_duration);
+ rx_duration_high = __le32_to_cpu
+ (extd->rx_duration_high);
+
+ if (test_bit(WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT,
+ &rx_duration_high)) {
+ rx_duration_high =
+ FIELD_GET(WMI_TLV_PEER_RX_DURATION_HIGH_MASK,
+ rx_duration_high);
+ dst->rx_duration |= (u64)rx_duration_high <<
+ WMI_TLV_PEER_RX_DURATION_SHIFT;
+ }
+ }
+
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_roam_ev *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = ev->vdev_id;
+ arg->reason = ev->reason;
+ arg->rssi = ev->rssi;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_wow_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_wow_event_info *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = __le32_to_cpu(ev->vdev_id);
+ arg->flag = __le32_to_cpu(ev->flag);
+ arg->wake_reason = __le32_to_cpu(ev->wake_reason);
+ arg->data_len = __le32_to_cpu(ev->data_len);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_echo_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->value = ev->value;
+
+ kfree(tb);
+ return 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
+{
+ struct wmi_tlv_pdev_suspend *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->opt = __cpu_to_le32(opt);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
+{
+ struct wmi_tlv_resume_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->reserved = __cpu_to_le32(0);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
+ u16 rd, u16 rd2g, u16 rd5g,
+ u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct wmi_tlv_pdev_set_rd_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->regd = __cpu_to_le32(rd);
+ cmd->regd_2ghz = __cpu_to_le32(rd2g);
+ cmd->regd_5ghz = __cpu_to_le32(rd5g);
+ cmd->conform_limit_2ghz = __cpu_to_le32(ctl2g);
+ cmd->conform_limit_5ghz = __cpu_to_le32(ctl5g);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
+ return skb;
+}
+
+static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
+{
+ return WMI_TXBF_CONF_AFTER_ASSOC;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
+ u32 param_value)
+{
+ struct wmi_tlv_pdev_set_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param %d value 0x%x\n",
+ param_id, param_value);
+ return skb;
+}
+
+static void
+ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks)
+{
+ struct host_memory_chunk_tlv *chunk;
+ struct wmi_tlv *tlv;
+ dma_addr_t paddr;
+ int i;
+ __le16 tlv_len, tlv_tag;
+
+ tlv_tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK);
+ tlv_len = __cpu_to_le16(sizeof(*chunk));
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ tlv = host_mem_chunks;
+ tlv->tag = tlv_tag;
+ tlv->len = tlv_len;
+ chunk = (void *)tlv->value;
+
+ chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+ chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+ chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+ if (test_bit(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
+ ar->wmi.svc_map)) {
+ paddr = ar->wmi.mem_chunks[i].paddr;
+ chunk->ptr_high = __cpu_to_le32(upper_32_bits(paddr));
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n",
+ i,
+ ar->wmi.mem_chunks[i].len,
+ (unsigned long long)ar->wmi.mem_chunks[i].paddr,
+ ar->wmi.mem_chunks[i].req_id);
+
+ host_mem_chunks += sizeof(*tlv);
+ host_mem_chunks += sizeof(*chunk);
+ }
+}
+
+static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ struct wmi_tlv_init_cmd *cmd;
+ struct wmi_tlv_resource_config *cfg;
+ void *chunks;
+ size_t len, chunks_len;
+ void *ptr;
+
+ chunks_len = ar->wmi.num_mem_chunks *
+ (sizeof(struct host_memory_chunk_tlv) + sizeof(*tlv));
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + sizeof(*cfg)) +
+ (sizeof(*tlv) + chunks_len);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
+ tlv->len = __cpu_to_le16(sizeof(*cfg));
+ cfg = (void *)tlv->value;
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cfg);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(chunks_len);
+ chunks = (void *)tlv->value;
+
+ ptr += sizeof(*tlv);
+ ptr += chunks_len;
+
+ cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
+ cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
+ cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
+ cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
+ cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
+ cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
+ cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+ cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+
+ if (ar->hw_params.num_peers)
+ cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
+ else
+ cfg->num_peers = __cpu_to_le32(TARGET_TLV_NUM_PEERS);
+ cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
+ cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
+
+ if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
+ cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ } else {
+ cfg->num_offload_peers = __cpu_to_le32(0);
+ cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
+ }
+
+ cfg->num_peer_keys = __cpu_to_le32(2);
+ if (ar->hw_params.num_peers)
+ cfg->num_tids = __cpu_to_le32(ar->hw_params.num_peers * 2);
+ else
+ cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
+ cfg->tx_chain_mask = __cpu_to_le32(0x7);
+ cfg->rx_chain_mask = __cpu_to_le32(0x7);
+ cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
+ cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
+ cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
+ cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
+ cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
+ cfg->scan_max_pending_reqs = __cpu_to_le32(4);
+ cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
+ cfg->num_mcast_groups = __cpu_to_le32(0);
+ cfg->num_mcast_table_elems = __cpu_to_le32(0);
+ cfg->mcast2ucast_mode = __cpu_to_le32(0);
+ cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
+ cfg->dma_burst_size = __cpu_to_le32(0);
+ cfg->mac_aggr_delim = __cpu_to_le32(0);
+ cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
+ cfg->vow_config = __cpu_to_le32(0);
+ cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
+ cfg->num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
+ cfg->max_frag_entries = __cpu_to_le32(2);
+ cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
+ cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
+ cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
+ cfg->num_multicast_filter_entries = __cpu_to_le32(5);
+ cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
+ cfg->num_keep_alive_pattern = __cpu_to_le32(6);
+ cfg->keep_alive_pattern_size = __cpu_to_le32(0);
+ cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
+ cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
+ cfg->wmi_send_separate = __cpu_to_le32(0);
+ cfg->num_ocb_vdevs = __cpu_to_le32(0);
+ cfg->num_ocb_channels = __cpu_to_le32(0);
+ cfg->num_ocb_schedules = __cpu_to_le32(0);
+ cfg->host_capab = __cpu_to_le32(WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL);
+
+ if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
+ cfg->host_capab |= __cpu_to_le32(WMI_RSRC_CFG_FLAG_TX_ACK_RSSI);
+
+ ath10k_wmi_tlv_put_host_mem_chunks(ar, chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_tlv_start_scan_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len, chan_len, ssid_len, bssid_len, ie_len;
+ __le32 *chans;
+ struct wmi_ssid *ssids;
+ struct wmi_mac_addr *addrs;
+ void *ptr;
+ int i, ret;
+
+ ret = ath10k_wmi_start_scan_verify(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ chan_len = arg->n_channels * sizeof(__le32);
+ ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
+ bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
+ ie_len = roundup(arg->ie_len, 4);
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ sizeof(*tlv) + chan_len +
+ sizeof(*tlv) + ssid_len +
+ sizeof(*tlv) + bssid_len +
+ sizeof(*tlv) + ie_len;
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
+ cmd->num_channels = __cpu_to_le32(arg->n_channels);
+ cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
+ cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
+ cmd->ie_len = __cpu_to_le32(arg->ie_len);
+ cmd->num_probes = __cpu_to_le32(3);
+ ether_addr_copy(cmd->mac_addr.addr, arg->mac_addr.addr);
+ ether_addr_copy(cmd->mac_mask.addr, arg->mac_mask.addr);
+
+ /* FIXME: There are some scan flag inconsistencies across firmwares,
+ * e.g. WMI-TLV inverts the logic behind the following flag.
+ */
+ cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(chan_len);
+ chans = (void *)tlv->value;
+ for (i = 0; i < arg->n_channels; i++)
+ chans[i] = __cpu_to_le32(arg->channels[i]);
+
+ ptr += sizeof(*tlv);
+ ptr += chan_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
+ tlv->len = __cpu_to_le16(ssid_len);
+ ssids = (void *)tlv->value;
+ for (i = 0; i < arg->n_ssids; i++) {
+ ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
+ memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
+ }
+
+ ptr += sizeof(*tlv);
+ ptr += ssid_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
+ tlv->len = __cpu_to_le16(bssid_len);
+ addrs = (void *)tlv->value;
+ for (i = 0; i < arg->n_bssids; i++)
+ ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
+
+ ptr += sizeof(*tlv);
+ ptr += bssid_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(ie_len);
+ memcpy(tlv->value, arg->ie, arg->ie_len);
+
+ ptr += sizeof(*tlv);
+ ptr += ie_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
+ const struct wmi_stop_scan_arg *arg)
+{
+ struct wmi_stop_scan_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u32 scan_id;
+ u32 req_id;
+
+ if (arg->req_id > 0xFFF)
+ return ERR_PTR(-EINVAL);
+ if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ scan_id = arg->u.scan_id;
+ scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
+
+ req_id = arg->req_id;
+ req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->req_type = __cpu_to_le32(arg->req_type);
+ cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
+ cmd->scan_id = __cpu_to_le32(scan_id);
+ cmd->scan_req_id = __cpu_to_le32(req_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
+ return skb;
+}
+
+static int ath10k_wmi_tlv_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype)
+{
+ switch (subtype) {
+ case WMI_VDEV_SUBTYPE_NONE:
+ return WMI_TLV_VDEV_SUBTYPE_NONE;
+ case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+ return WMI_TLV_VDEV_SUBTYPE_P2P_DEV;
+ case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+ return WMI_TLV_VDEV_SUBTYPE_P2P_CLI;
+ case WMI_VDEV_SUBTYPE_P2P_GO:
+ return WMI_TLV_VDEV_SUBTYPE_P2P_GO;
+ case WMI_VDEV_SUBTYPE_PROXY_STA:
+ return WMI_TLV_VDEV_SUBTYPE_PROXY_STA;
+ case WMI_VDEV_SUBTYPE_MESH_11S:
+ return WMI_TLV_VDEV_SUBTYPE_MESH_11S;
+ case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+ return -EOPNOTSUPP;
+ }
+ return -EOPNOTSUPP;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_vdev_type vdev_type,
+ enum wmi_vdev_subtype vdev_subtype,
+ const u8 mac_addr[ETH_ALEN])
+{
+ struct wmi_vdev_create_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_type = __cpu_to_le32(vdev_type);
+ cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
+ ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_delete_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg,
+ bool restart)
+{
+ struct wmi_tlv_vdev_start_cmd *cmd;
+ struct wmi_channel *ch;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+ u32 flags = 0;
+
+ if (WARN_ON(arg->hidden_ssid && !arg->ssid))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+ return ERR_PTR(-EINVAL);
+
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + sizeof(*ch)) +
+ (sizeof(*tlv) + 0);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ if (arg->hidden_ssid)
+ flags |= WMI_VDEV_START_HIDDEN_SSID;
+ if (arg->pmf_enabled)
+ flags |= WMI_VDEV_START_PMF_ENABLED;
+
+ ptr = (void *)skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
+ cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
+ cmd->flags = __cpu_to_le32(flags);
+ cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
+ cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
+ cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
+
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+ tlv->len = __cpu_to_le16(sizeof(*ch));
+ ch = (void *)tlv->value;
+ ath10k_wmi_put_wmi_channel(ar, ch, &arg->channel);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*ch);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = 0;
+
+ /* Note: This is a nested TLV containing:
+ * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ */
+
+ ptr += sizeof(*tlv);
+ ptr += 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_stop_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid)
+
+{
+ struct wmi_vdev_up_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_assoc_id = __cpu_to_le32(aid);
+ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_down_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value)
+{
+ struct wmi_vdev_set_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev %d set param %d value 0x%x\n",
+ vdev_id, param_id, param_value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg)
+{
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+
+ if (arg->key_cipher == ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
+ arg->key_data)
+ return ERR_PTR(-EINVAL);
+ if (arg->key_cipher != ar->wmi_key_cipher[WMI_CIPHER_NONE] &&
+ !arg->key_data)
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->key_idx = __cpu_to_le32(arg->key_idx);
+ cmd->key_flags = __cpu_to_le32(arg->key_flags);
+ cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
+ cmd->key_len = __cpu_to_le32(arg->key_len);
+ cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
+ cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
+
+ if (arg->macaddr)
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
+ if (arg->key_data)
+ memcpy(tlv->value, arg->key_data, arg->key_len);
+
+ ptr += sizeof(*tlv);
+ ptr += roundup(arg->key_len, sizeof(__le32));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
+ return skb;
+}
+
+static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
+ const struct wmi_sta_uapsd_auto_trig_arg *arg)
+{
+ struct wmi_sta_uapsd_auto_trig_param *ac;
+ struct wmi_tlv *tlv;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
+ tlv->len = __cpu_to_le16(sizeof(*ac));
+ ac = (void *)tlv->value;
+
+ ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
+ ac->user_priority = __cpu_to_le32(arg->user_priority);
+ ac->service_interval = __cpu_to_le32(arg->service_interval);
+ ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
+ ac->delay_interval = __cpu_to_le32(arg->delay_interval);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
+ ac->wmm_ac, ac->user_priority, ac->service_interval,
+ ac->suspend_interval, ac->delay_interval);
+
+ return ptr + sizeof(*tlv) + sizeof(*ac);
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ const struct wmi_sta_uapsd_auto_trig_arg *args,
+ u32 num_ac)
+{
+ struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
+ struct wmi_sta_uapsd_auto_trig_param *ac;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ size_t ac_tlv_len;
+ void *ptr;
+ int i;
+
+ ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + ac_tlv_len;
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->num_ac = __cpu_to_le32(num_ac);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(ac_tlv_len);
+ ac = (void *)tlv->value;
+
+ ptr += sizeof(*tlv);
+ for (i = 0; i < num_ac; i++)
+ ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
+ return skb;
+}
+
+static void *ath10k_wmi_tlv_put_wmm(void *ptr,
+ const struct wmi_wmm_params_arg *arg)
+{
+ struct wmi_wmm_params *wmm;
+ struct wmi_tlv *tlv;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
+ tlv->len = __cpu_to_le16(sizeof(*wmm));
+ wmm = (void *)tlv->value;
+ ath10k_wmi_set_wmm_param(wmm, arg);
+
+ return ptr + sizeof(*tlv) + sizeof(*wmm);
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct wmi_tlv_vdev_set_wmm_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct wmi_tlv_sta_keepalive_cmd *cmd;
+ struct wmi_sta_keepalive_arp_resp *arp;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*arp);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->enabled = __cpu_to_le32(arg->enabled);
+ cmd->method = __cpu_to_le32(arg->method);
+ cmd->interval = __cpu_to_le32(arg->interval);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
+ tlv->len = __cpu_to_le16(sizeof(*arp));
+ arp = (void *)tlv->value;
+
+ arp->src_ip4_addr = arg->src_ip4_addr;
+ arp->dest_ip4_addr = arg->dest_ip4_addr;
+ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d interval %d\n",
+ arg->vdev_id, arg->enabled, arg->method, arg->interval);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type)
+{
+ struct wmi_tlv_peer_create_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->peer_type = __cpu_to_le32(peer_type);
+ ether_addr_copy(cmd->peer_addr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct wmi_peer_delete_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+ struct wmi_peer_flush_tids_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
+ const u8 *peer_addr,
+ enum wmi_peer_param param_id,
+ u32 param_value)
+{
+ struct wmi_peer_set_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv vdev %d peer %pM set param %d value 0x%x\n",
+ vdev_id, peer_addr, param_id, param_value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_tlv_peer_assoc_cmd *cmd;
+ struct wmi_vht_rate_set *vht_rate;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len, legacy_rate_len, ht_rate_len;
+ void *ptr;
+
+ if (arg->peer_mpdu_density > 16)
+ return ERR_PTR(-EINVAL);
+ if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
+ return ERR_PTR(-EINVAL);
+ if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
+ return ERR_PTR(-EINVAL);
+
+ legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
+ sizeof(__le32));
+ ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + legacy_rate_len) +
+ (sizeof(*tlv) + ht_rate_len) +
+ (sizeof(*tlv) + sizeof(*vht_rate));
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
+ cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
+ cmd->flags = __cpu_to_le32(arg->peer_flags);
+ cmd->caps = __cpu_to_le32(arg->peer_caps);
+ cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
+ cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
+ cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
+ cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
+ cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
+ cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
+ cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
+ cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
+ cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
+ cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
+ ether_addr_copy(cmd->mac_addr.addr, arg->addr);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(legacy_rate_len);
+ memcpy(tlv->value, arg->peer_legacy_rates.rates,
+ arg->peer_legacy_rates.num_rates);
+
+ ptr += sizeof(*tlv);
+ ptr += legacy_rate_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(ht_rate_len);
+ memcpy(tlv->value, arg->peer_ht_rates.rates,
+ arg->peer_ht_rates.num_rates);
+
+ ptr += sizeof(*tlv);
+ ptr += ht_rate_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
+ tlv->len = __cpu_to_le16(sizeof(*vht_rate));
+ vht_rate = (void *)tlv->value;
+
+ vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
+ vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
+ vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
+ vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*vht_rate);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode)
+{
+ struct wmi_sta_powersave_mode_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->sta_ps_mode = __cpu_to_le32(psmode);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id,
+ u32 param_value)
+{
+ struct wmi_sta_powersave_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+ struct wmi_ap_ps_peer_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(value);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg)
+{
+ struct wmi_tlv_scan_chan_list_cmd *cmd;
+ struct wmi_channel *ci;
+ struct wmi_channel_arg *ch;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t chans_len, len;
+ int i;
+ void *ptr, *chans;
+
+ chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + chans_len);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(chans_len);
+ chans = (void *)tlv->value;
+
+ for (i = 0; i < arg->n_channels; i++) {
+ ch = &arg->channels[i];
+
+ tlv = chans;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+ tlv->len = __cpu_to_le16(sizeof(*ci));
+ ci = (void *)tlv->value;
+
+ ath10k_wmi_put_wmi_channel(ar, ci, ch);
+
+ chans += sizeof(*tlv);
+ chans += sizeof(*ci);
+ }
+
+ ptr += sizeof(*tlv);
+ ptr += chans_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_scan_prob_req_oui(struct ath10k *ar, u32 prob_req_oui)
+{
+ struct wmi_scan_prob_req_oui_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->prob_req_oui = __cpu_to_le32(prob_req_oui);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan prob req oui\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
+ const void *bcn, size_t bcn_len,
+ u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab)
+
+{
+ struct wmi_bcn_tx_ref_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ hdr = (struct ieee80211_hdr *)bcn;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->data_len = __cpu_to_le32(bcn_len);
+ cmd->data_ptr = __cpu_to_le32(bcn_paddr);
+ cmd->msdu_id = 0;
+ cmd->frame_control = __cpu_to_le32(fc);
+ cmd->flags = 0;
+
+ if (dtim_zero)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
+
+ if (deliver_cab)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct wmi_tlv_pdev_set_wmm_cmd *cmd;
+ struct wmi_wmm_params *wmm;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (4 * (sizeof(*tlv) + sizeof(*wmm)));
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ /* nothing to set here */
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
+{
+ struct wmi_request_stats_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->stats_id = __cpu_to_le32(stats_mask);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_request_peer_stats_info(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_peer_stats_info_request_type type,
+ u8 *addr,
+ u32 reset)
+{
+ struct wmi_tlv_request_peer_stats_info *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->request_type = __cpu_to_le32(type);
+
+ if (type == WMI_REQUEST_ONE_PEER_STATS_INFO)
+ ether_addr_copy(cmd->peer_macaddr.addr, addr);
+
+ cmd->reset_after_request = __cpu_to_le32(reset);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request peer stats info\n");
+ return skb;
+}
+
+static int
+ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar,
+ struct sk_buff *msdu)
+{
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
+ struct ath10k_wmi *wmi = &ar->wmi;
+
+ spin_lock_bh(&ar->data_lock);
+ pkt_addr = idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
+ spin_unlock_bh(&ar->data_lock);
+
+ kfree(pkt_addr);
+
+ return 0;
+}
+
+static int
+ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
+ dma_addr_t paddr)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
+ int ret;
+
+ pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC);
+ if (!pkt_addr)
+ return -ENOMEM;
+
+ pkt_addr->vaddr = skb;
+ pkt_addr->paddr = paddr;
+
+ spin_lock_bh(&ar->data_lock);
+ ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0,
+ wmi->mgmt_max_num_pending_tx, GFP_ATOMIC);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret);
+ return ret;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
+ dma_addr_t paddr)
+{
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+ struct wmi_tlv_mgmt_tx_cmd *cmd;
+ struct ieee80211_hdr *hdr;
+ struct ath10k_vif *arvif;
+ u32 buf_len = msdu->len;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int len, desc_id;
+ u32 vdev_id;
+ void *ptr;
+
+ if (!cb->vif)
+ return ERR_PTR(-EINVAL);
+
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ arvif = (void *)cb->vif->drv_priv;
+ vdev_id = arvif->vdev_id;
+
+ if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control) &&
+ (!(ieee80211_is_nullfunc(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control)))))
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(*cmd) + 2 * sizeof(*tlv);
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ buf_len += IEEE80211_CCMP_MIC_LEN;
+ }
+
+ buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
+ buf_len = round_up(buf_len, 4);
+
+ len += buf_len;
+ len = round_up(len, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr);
+ if (desc_id < 0)
+ goto err_free_skb;
+
+ cb->msdu_id = desc_id;
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->desc_id = __cpu_to_le32(desc_id);
+ cmd->chanfreq = 0;
+ cmd->buf_len = __cpu_to_le32(buf_len);
+ cmd->frame_len = __cpu_to_le32(msdu->len);
+ cmd->paddr = __cpu_to_le64(paddr);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(buf_len);
+
+ ptr += sizeof(*tlv);
+ memcpy(ptr, msdu->data, buf_len);
+
+ return skb;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+ return ERR_PTR(desc_id);
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type,
+ u32 delay_ms)
+{
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->type = __cpu_to_le32(type);
+ cmd->delay_ms = __cpu_to_le32(delay_ms);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
+ u32 log_level)
+{
+ struct wmi_tlv_dbglog_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len, bmap_len;
+ u32 value;
+ void *ptr;
+
+ if (module_enable) {
+ value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
+ module_enable,
+ WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
+ } else {
+ value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
+ WMI_TLV_DBGLOG_ALL_MODULES,
+ WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
+ }
+
+ bmap_len = 0;
+ len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
+ cmd->value = __cpu_to_le32(value);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(bmap_len);
+
+ /* nothing to do here */
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(bmap_len);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
+{
+ struct wmi_tlv_pktlog_enable *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->filter = __cpu_to_le32(filter);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
+ filter);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_get_temperature(struct ath10k *ar)
+{
+ struct wmi_tlv_pdev_get_temp_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature tlv\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
+{
+ struct wmi_tlv_pktlog_disable *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
+ u32 tim_ie_offset, struct sk_buff *bcn,
+ u32 prb_caps, u32 prb_erp, void *prb_ies,
+ size_t prb_ies_len)
+{
+ struct wmi_tlv_bcn_tmpl_cmd *cmd;
+ struct wmi_tlv_bcn_prb_info *info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ if (WARN_ON(prb_ies_len > 0 && !prb_ies))
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*info) + prb_ies_len +
+ sizeof(*tlv) + roundup(bcn->len, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
+ cmd->buf_len = __cpu_to_le32(bcn->len);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ /* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
+ * then it is then impossible to pass original ie len.
+ * This chunk is not used yet so if setting probe resp template yields
+ * problems with beaconing or crashes firmware look here.
+ */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
+ tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
+ info = (void *)tlv->value;
+ info->caps = __cpu_to_le32(prb_caps);
+ info->erp = __cpu_to_le32(prb_erp);
+ memcpy(info->ies, prb_ies, prb_ies_len);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*info);
+ ptr += prb_ies_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
+ memcpy(tlv->value, bcn->data, bcn->len);
+
+ /* FIXME: Adjust TSF? */
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
+ vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
+ struct sk_buff *prb)
+{
+ struct wmi_tlv_prb_tmpl_cmd *cmd;
+ struct wmi_tlv_bcn_prb_info *info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*info) +
+ sizeof(*tlv) + roundup(prb->len, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->buf_len = __cpu_to_le32(prb->len);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
+ tlv->len = __cpu_to_le16(sizeof(*info));
+ info = (void *)tlv->value;
+ info->caps = 0;
+ info->erp = 0;
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*info);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(prb->len, 4));
+ memcpy(tlv->value, prb->data, prb->len);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
+ vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
+ const u8 *p2p_ie)
+{
+ struct wmi_tlv_p2p_go_bcn_ie *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
+ memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
+
+ ptr += sizeof(*tlv);
+ ptr += roundup(p2p_ie[1] + 2, 4);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
+ vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tdls_state state)
+{
+ struct wmi_tdls_set_state_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+ /* Set to options from wmi_tlv_tdls_options,
+ * for now none of them are enabled.
+ */
+ u32 options = 0;
+
+ if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
+ options |= WMI_TLV_TDLS_BUFFER_STA_EN;
+
+ /* WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL means firm will handle TDLS
+ * link inactivity detecting logic.
+ */
+ if (state == WMI_TDLS_ENABLE_ACTIVE)
+ state = WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->state = __cpu_to_le32(state);
+ cmd->notification_interval_ms = __cpu_to_le32(5000);
+ cmd->tx_discovery_threshold = __cpu_to_le32(100);
+ cmd->tx_teardown_threshold = __cpu_to_le32(5);
+ cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
+ cmd->rssi_delta = __cpu_to_le32(-20);
+ cmd->tdls_options = __cpu_to_le32(options);
+ cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
+ cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
+ cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
+ cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
+ cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
+ state, vdev_id);
+ return skb;
+}
+
+static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
+{
+ u32 peer_qos = 0;
+
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
+
+ peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
+
+ return peer_qos;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan_arg)
+{
+ struct wmi_tdls_peer_update_cmd *cmd;
+ struct wmi_tdls_peer_capab *peer_cap;
+ struct wmi_channel *chan;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u32 peer_qos;
+ void *ptr;
+ int len;
+ int i;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*peer_cap) +
+ sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
+ cmd->peer_state = __cpu_to_le32(arg->peer_state);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
+ tlv->len = __cpu_to_le16(sizeof(*peer_cap));
+ peer_cap = (void *)tlv->value;
+ peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
+ cap->peer_max_sp);
+ peer_cap->peer_qos = __cpu_to_le32(peer_qos);
+ peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
+ peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
+ peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
+ peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
+ peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
+ peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
+
+ for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
+ peer_cap->peer_operclass[i] = cap->peer_operclass[i];
+
+ peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
+ peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
+ peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*peer_cap);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
+
+ ptr += sizeof(*tlv);
+
+ for (i = 0; i < cap->peer_chan_len; i++) {
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+ tlv->len = __cpu_to_le16(sizeof(*chan));
+ chan = (void *)tlv->value;
+ ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*chan);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
+ arg->vdev_id, arg->peer_state, cap->peer_chan_len);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
+ u32 duration, u32 next_offset,
+ u32 enabled)
+{
+ struct wmi_tlv_set_quiet_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ /* vdev_id is not in use, set to 0 */
+ cmd->vdev_id = __cpu_to_le32(0);
+ cmd->period = __cpu_to_le32(period);
+ cmd->duration = __cpu_to_le32(duration);
+ cmd->next_start = __cpu_to_le32(next_offset);
+ cmd->enabled = __cpu_to_le32(enabled);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv quiet param: period %u duration %u enabled %d\n",
+ period, duration, enabled);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
+{
+ struct wmi_tlv_wow_enable_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->enable = __cpu_to_le32(1);
+ if (!ar->bus_param.link_can_suspend)
+ cmd->pause_iface_config = __cpu_to_le32(WOW_IFACE_PAUSE_DISABLED);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct wmi_tlv_wow_add_del_event_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->is_add = __cpu_to_le32(enable);
+ cmd->event_bitmap = __cpu_to_le32(1 << event);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+ wow_wakeup_event(event), enable, vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
+{
+ struct wmi_tlv_wow_host_wakeup_ind *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id, const u8 *pattern,
+ const u8 *bitmask, int pattern_len,
+ int pattern_offset)
+{
+ struct wmi_tlv_wow_add_pattern_cmd *cmd;
+ struct wmi_tlv_wow_bitmap_pattern *bitmap;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + /* array struct */
+ sizeof(*tlv) + sizeof(*bitmap) + /* bitmap */
+ sizeof(*tlv) + /* empty ipv4 sync */
+ sizeof(*tlv) + /* empty ipv6 sync */
+ sizeof(*tlv) + /* empty magic */
+ sizeof(*tlv) + /* empty info timeout */
+ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ /* cmd */
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->pattern_id = __cpu_to_le32(pattern_id);
+ cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ /* bitmap */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
+
+ ptr += sizeof(*tlv);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
+ tlv->len = __cpu_to_le16(sizeof(*bitmap));
+ bitmap = (void *)tlv->value;
+
+ memcpy(bitmap->patternbuf, pattern, pattern_len);
+ memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
+ bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
+ bitmap->pattern_len = __cpu_to_le32(pattern_len);
+ bitmap->bitmask_len = __cpu_to_le32(pattern_len);
+ bitmap->pattern_id = __cpu_to_le32(pattern_id);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*bitmap);
+
+ /* ipv4 sync */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* ipv6 sync */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* magic */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* pattern info timeout */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* ratelimit interval */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(sizeof(u32));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
+ vdev_id, pattern_id, pattern_offset);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id)
+{
+ struct wmi_tlv_wow_del_pattern_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->pattern_id = __cpu_to_le32(pattern_id);
+ cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+ vdev_id, pattern_id);
+ return skb;
+}
+
+/* Request FW to start PNO operation */
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
+ u32 vdev_id,
+ struct wmi_pno_scan_req *pno)
+{
+ struct nlo_configured_parameters *nlo_list;
+ struct wmi_tlv_wow_nlo_config_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ __le32 *channel_list;
+ u16 tlv_len;
+ size_t len;
+ void *ptr;
+ u32 i;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_parameters(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+
+ len += sizeof(u32) * min_t(u8, pno->a_networks[0].channel_count,
+ WMI_NLO_MAX_CHAN);
+ len += sizeof(struct nlo_configured_parameters) *
+ min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ /* wmi_tlv_wow_nlo_config_cmd parameters*/
+ cmd->vdev_id = __cpu_to_le32(pno->vdev_id);
+ cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
+
+ /* current FW does not support min-max range for dwell time */
+ cmd->active_dwell_time = __cpu_to_le32(pno->active_max_time);
+ cmd->passive_dwell_time = __cpu_to_le32(pno->passive_max_time);
+
+ if (pno->do_passive_scan)
+ cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
+
+ /* copy scan interval */
+ cmd->fast_scan_period = __cpu_to_le32(pno->fast_scan_period);
+ cmd->slow_scan_period = __cpu_to_le32(pno->slow_scan_period);
+ cmd->fast_scan_max_cycles = __cpu_to_le32(pno->fast_scan_max_cycles);
+ cmd->delay_start_time = __cpu_to_le32(pno->delay_start_time);
+
+ if (pno->enable_pno_scan_randomization) {
+ cmd->flags |= __cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
+ WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
+ ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
+ ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
+ }
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_parameters(nlo_list) */
+ cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
+ WMI_NLO_MAX_SSIDS));
+ tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
+ sizeof(struct nlo_configured_parameters);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(tlv_len);
+
+ ptr += sizeof(*tlv);
+ nlo_list = ptr;
+ for (i = 0; i < __le32_to_cpu(cmd->no_of_ssids); i++) {
+ tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(sizeof(struct nlo_configured_parameters) -
+ sizeof(*tlv));
+
+ /* copy ssid and it's length */
+ nlo_list[i].ssid.valid = __cpu_to_le32(true);
+ nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
+ memcpy(nlo_list[i].ssid.ssid.ssid,
+ pno->a_networks[i].ssid.ssid,
+ __le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
+
+ /* copy rssi threshold */
+ if (pno->a_networks[i].rssi_threshold &&
+ pno->a_networks[i].rssi_threshold > -300) {
+ nlo_list[i].rssi_cond.valid = __cpu_to_le32(true);
+ nlo_list[i].rssi_cond.rssi =
+ __cpu_to_le32(pno->a_networks[i].rssi_threshold);
+ }
+
+ nlo_list[i].bcast_nw_type.valid = __cpu_to_le32(true);
+ nlo_list[i].bcast_nw_type.bcast_nw_type =
+ __cpu_to_le32(pno->a_networks[i].bcast_nw_type);
+ }
+
+ ptr += __le32_to_cpu(cmd->no_of_ssids) * sizeof(struct nlo_configured_parameters);
+
+ /* copy channel info */
+ cmd->num_of_channels = __cpu_to_le32(min_t(u8,
+ pno->a_networks[0].channel_count,
+ WMI_NLO_MAX_CHAN));
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(__le32_to_cpu(cmd->num_of_channels) *
+ sizeof(u_int32_t));
+ ptr += sizeof(*tlv);
+
+ channel_list = (__le32 *)ptr;
+ for (i = 0; i < __le32_to_cpu(cmd->num_of_channels); i++)
+ channel_list[i] = __cpu_to_le32(pno->a_networks[0].channels[i]);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
+ vdev_id);
+
+ return skb;
+}
+
+/* Request FW to stop ongoing PNO operation */
+static struct sk_buff *ath10k_wmi_tlv_op_gen_config_pno_stop(struct ath10k *ar,
+ u32 vdev_id)
+{
+ struct wmi_tlv_wow_nlo_config_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_parameters(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->flags = __cpu_to_le32(WMI_NLO_CONFIG_STOP);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_parameters(nlo_list) */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* channel list */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(0);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop pno config vdev_id %d\n", vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_config_pno(struct ath10k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan)
+{
+ if (pno_scan->enable)
+ return ath10k_wmi_tlv_op_gen_config_pno_start(ar, vdev_id, pno_scan);
+ else
+ return ath10k_wmi_tlv_op_gen_config_pno_stop(ar, vdev_id);
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+ struct wmi_tlv_adaptive_qcs *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->enable = __cpu_to_le32(enable ? 1 : 0);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
+{
+ struct wmi_echo_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->value = cpu_to_le32(value);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_spectral_conf(struct ath10k *ar,
+ const struct wmi_vdev_spectral_conf_arg *arg)
+{
+ struct wmi_vdev_spectral_conf_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->scan_count = __cpu_to_le32(arg->scan_count);
+ cmd->scan_period = __cpu_to_le32(arg->scan_period);
+ cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
+ cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
+ cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
+ cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
+ cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
+ cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
+ cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
+ cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
+ cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
+ cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
+ cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
+ cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
+ cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
+ cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
+ cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
+ cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
+ u32 trigger, u32 enable)
+{
+ struct wmi_vdev_spectral_enable_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->trigger_cmd = __cpu_to_le32(trigger);
+ cmd->enable_cmd = __cpu_to_le32(enable);
+
+ return skb;
+}
+
+/****************/
+/* TLV mappings */
+/****************/
+
+static struct wmi_cmd_map wmi_tlv_cmd_map = {
+ .init_cmdid = WMI_TLV_INIT_CMDID,
+ .start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
+ .scan_prob_req_oui_cmdid = WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
+ .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
+ .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
+ .mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
+ .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
+ .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
+ .ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
+ .peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
+ .vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
+ .request_peer_stats_info_cmdid = WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
+ .network_list_offload_config_cmdid =
+ WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
+ .gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
+ .csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
+ .csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ .chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
+ .peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
+ .peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
+ .sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
+ .sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
+ .sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
+ .echo_cmdid = WMI_TLV_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
+ .vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
+ .vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
+ .force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
+ .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
+ .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+ .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
+ .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
+ .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
+ .tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ .pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ .pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
+ .dcs = WMI_TLV_PDEV_PARAM_DCS,
+ .ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
+ .idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
+ .power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
+ .fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
+ .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rfkill_config = WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
+ .rfkill_enable = WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
+ .peer_stats_info_enable = WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
+};
+
+static struct wmi_peer_param_map wmi_tlv_peer_param_map = {
+ .smps_state = WMI_TLV_PEER_SMPS_STATE,
+ .ampdu = WMI_TLV_PEER_AMPDU,
+ .authorize = WMI_TLV_PEER_AUTHORIZE,
+ .chan_width = WMI_TLV_PEER_CHAN_WIDTH,
+ .nss = WMI_TLV_PEER_NSS,
+ .use_4addr = WMI_TLV_PEER_USE_4ADDR,
+ .membership = WMI_TLV_PEER_MEMBERSHIP,
+ .user_pos = WMI_TLV_PEER_USERPOS,
+ .crit_proto_hint_enabled = WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED,
+ .tx_fail_cnt_thr = WMI_TLV_PEER_TX_FAIL_CNT_THR,
+ .set_hw_retry_cts2s = WMI_TLV_PEER_SET_HW_RETRY_CTS2S,
+ .ibss_atim_win_len = WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH,
+ .phymode = WMI_TLV_PEER_PHYMODE,
+ .use_fixed_power = WMI_TLV_PEER_USE_FIXED_PWR,
+ .dummy_var = WMI_TLV_PEER_DUMMY_VAR,
+};
+
+static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
+ .rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_TLV_VDEV_PARAM_WDS,
+ .atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
+ .bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
+ .feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_TLV_VDEV_PARAM_SGI,
+ .ldpc = WMI_TLV_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_TLV_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+ .enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_TLV_VDEV_PARAM_TXBF,
+ .packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
+ .drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
+ .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+static const struct wmi_ops wmi_tlv_ops = {
+ .rx = ath10k_wmi_tlv_op_rx,
+ .map_svc = wmi_tlv_svc_map,
+ .map_svc_ext = wmi_tlv_svc_map_ext,
+
+ .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
+ .pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
+ .pull_mgmt_tx_bundle_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_bundle_compl_ev,
+ .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
+ .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
+ .pull_svc_avail = ath10k_wmi_tlv_op_pull_svc_avail,
+ .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
+ .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
+ .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
+ .pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
+ .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
+
+ .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
+ .gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
+ .gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
+ .gen_init = ath10k_wmi_tlv_op_gen_init,
+ .gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
+ .gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
+ .gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
+ .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
+ .gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
+ .gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
+ .gen_scan_prob_req_oui = ath10k_wmi_tlv_op_gen_scan_prob_req_oui,
+ .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
+ .gen_request_peer_stats_info = ath10k_wmi_tlv_op_gen_request_peer_stats_info,
+ .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
+ /* .gen_mgmt_tx = not implemented; HTT is used */
+ .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,
+ .cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send,
+ .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode,
+ .gen_pdev_get_temperature = ath10k_wmi_tlv_op_gen_pdev_get_temperature,
+ /* .gen_addba_clear_resp not implemented */
+ /* .gen_addba_send not implemented */
+ /* .gen_addba_set_resp not implemented */
+ /* .gen_delba_send not implemented */
+ .gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
+ .gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
+ .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
+ .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
+ .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
+ .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
+ .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
+ .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
+ .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
+ .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
+ .gen_wow_config_pno = ath10k_wmi_tlv_op_gen_config_pno,
+ .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
+ .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
+ .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
+ .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_tlv_op_get_vdev_subtype,
+ .gen_echo = ath10k_wmi_tlv_op_gen_echo,
+ .gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable,
+ /* .gen_gpio_config not implemented */
+ /* .gen_gpio_output not implemented */
+};
+
+static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
+ .auth = WMI_TLV_PEER_AUTH,
+ .qos = WMI_TLV_PEER_QOS,
+ .need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY,
+ .need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY,
+ .apsd = WMI_TLV_PEER_APSD,
+ .ht = WMI_TLV_PEER_HT,
+ .bw40 = WMI_TLV_PEER_40MHZ,
+ .stbc = WMI_TLV_PEER_STBC,
+ .ldbc = WMI_TLV_PEER_LDPC,
+ .dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS,
+ .static_mimops = WMI_TLV_PEER_STATIC_MIMOPS,
+ .spatial_mux = WMI_TLV_PEER_SPATIAL_MUX,
+ .vht = WMI_TLV_PEER_VHT,
+ .bw80 = WMI_TLV_PEER_80MHZ,
+ .pmf = WMI_TLV_PEER_PMF,
+ .bw160 = WMI_TLV_PEER_160MHZ,
+};
+
+/************/
+/* TLV init */
+/************/
+
+void ath10k_wmi_tlv_attach(struct ath10k *ar)
+{
+ ar->wmi.cmd = &wmi_tlv_cmd_map;
+ ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
+ ar->wmi.peer_param = &wmi_tlv_peer_param_map;
+ ar->wmi.ops = &wmi_tlv_ops;
+ ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
new file mode 100644
index 000000000000..8a2f87d0a3a3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -0,0 +1,2683 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _WMI_TLV_H
+#define _WMI_TLV_H
+
+#include <linux/bitops.h>
+
+#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_CMD_UNSUPPORTED 0
+#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_MGMT_TX_FRAME_MAX_LEN 64
+
+#define WMI_RSRC_CFG_FLAG_TX_ACK_RSSI BIT(18)
+
+enum wmi_tlv_grp_id {
+ WMI_TLV_GRP_START = 0x3,
+ WMI_TLV_GRP_SCAN = WMI_TLV_GRP_START,
+ WMI_TLV_GRP_PDEV,
+ WMI_TLV_GRP_VDEV,
+ WMI_TLV_GRP_PEER,
+ WMI_TLV_GRP_MGMT,
+ WMI_TLV_GRP_BA_NEG,
+ WMI_TLV_GRP_STA_PS,
+ WMI_TLV_GRP_DFS,
+ WMI_TLV_GRP_ROAM,
+ WMI_TLV_GRP_OFL_SCAN,
+ WMI_TLV_GRP_P2P,
+ WMI_TLV_GRP_AP_PS,
+ WMI_TLV_GRP_RATECTL,
+ WMI_TLV_GRP_PROFILE,
+ WMI_TLV_GRP_SUSPEND,
+ WMI_TLV_GRP_BCN_FILTER,
+ WMI_TLV_GRP_WOW,
+ WMI_TLV_GRP_RTT,
+ WMI_TLV_GRP_SPECTRAL,
+ WMI_TLV_GRP_STATS,
+ WMI_TLV_GRP_ARP_NS_OFL,
+ WMI_TLV_GRP_NLO_OFL,
+ WMI_TLV_GRP_GTK_OFL,
+ WMI_TLV_GRP_CSA_OFL,
+ WMI_TLV_GRP_CHATTER,
+ WMI_TLV_GRP_TID_ADDBA,
+ WMI_TLV_GRP_MISC,
+ WMI_TLV_GRP_GPIO,
+ WMI_TLV_GRP_FWTEST,
+ WMI_TLV_GRP_TDLS,
+ WMI_TLV_GRP_RESMGR,
+ WMI_TLV_GRP_STA_SMPS,
+ WMI_TLV_GRP_WLAN_HB,
+ WMI_TLV_GRP_RMC,
+ WMI_TLV_GRP_MHF_OFL,
+ WMI_TLV_GRP_LOCATION_SCAN,
+ WMI_TLV_GRP_OEM,
+ WMI_TLV_GRP_NAN,
+ WMI_TLV_GRP_COEX,
+ WMI_TLV_GRP_OBSS_OFL,
+ WMI_TLV_GRP_LPI,
+ WMI_TLV_GRP_EXTSCAN,
+ WMI_TLV_GRP_DHCP_OFL,
+ WMI_TLV_GRP_IPA,
+ WMI_TLV_GRP_MDNS_OFL,
+ WMI_TLV_GRP_SAP_OFL,
+};
+
+enum wmi_tlv_cmd_id {
+ WMI_TLV_INIT_CMDID = 0x1,
+ WMI_TLV_START_SCAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SCAN),
+ WMI_TLV_STOP_SCAN_CMDID,
+ WMI_TLV_SCAN_CHAN_LIST_CMDID,
+ WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
+ WMI_TLV_SCAN_UPDATE_REQUEST_CMDID,
+ WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
+ WMI_TLV_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PDEV),
+ WMI_TLV_PDEV_SET_CHANNEL_CMDID,
+ WMI_TLV_PDEV_SET_PARAM_CMDID,
+ WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_TLV_PDEV_DUMP_CMDID,
+ WMI_TLV_PDEV_SET_LED_CONFIG_CMDID,
+ WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_TLV_PDEV_SET_LED_FLASHING_CMDID,
+ WMI_TLV_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_VDEV),
+ WMI_TLV_VDEV_DELETE_CMDID,
+ WMI_TLV_VDEV_START_REQUEST_CMDID,
+ WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
+ WMI_TLV_VDEV_UP_CMDID,
+ WMI_TLV_VDEV_STOP_CMDID,
+ WMI_TLV_VDEV_DOWN_CMDID,
+ WMI_TLV_VDEV_SET_PARAM_CMDID,
+ WMI_TLV_VDEV_INSTALL_KEY_CMDID,
+ WMI_TLV_VDEV_WNM_SLEEPMODE_CMDID,
+ WMI_TLV_VDEV_WMM_ADDTS_CMDID,
+ WMI_TLV_VDEV_WMM_DELTS_CMDID,
+ WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+ WMI_TLV_VDEV_SET_GTX_PARAMS_CMDID,
+ WMI_TLV_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
+ WMI_TLV_VDEV_PLMREQ_START_CMDID,
+ WMI_TLV_VDEV_PLMREQ_STOP_CMDID,
+ WMI_TLV_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PEER),
+ WMI_TLV_PEER_DELETE_CMDID,
+ WMI_TLV_PEER_FLUSH_TIDS_CMDID,
+ WMI_TLV_PEER_SET_PARAM_CMDID,
+ WMI_TLV_PEER_ASSOC_CMDID,
+ WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_TLV_PEER_MCAST_GROUP_CMDID,
+ WMI_TLV_PEER_INFO_REQ_CMDID,
+ WMI_TLV_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
+ WMI_TLV_BCN_TX_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MGMT),
+ WMI_TLV_PDEV_SEND_BCN_CMDID,
+ WMI_TLV_BCN_TMPL_CMDID,
+ WMI_TLV_BCN_FILTER_RX_CMDID,
+ WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
+ WMI_TLV_MGMT_TX_CMDID,
+ WMI_TLV_PRB_TMPL_CMDID,
+ WMI_TLV_MGMT_TX_SEND_CMD,
+ WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG),
+ WMI_TLV_ADDBA_SEND_CMDID,
+ WMI_TLV_ADDBA_STATUS_CMDID,
+ WMI_TLV_DELBA_SEND_CMDID,
+ WMI_TLV_ADDBA_SET_RESP_CMDID,
+ WMI_TLV_SEND_SINGLEAMSDU_CMDID,
+ WMI_TLV_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_PS),
+ WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
+ WMI_TLV_STA_MIMO_PS_MODE_CMDID,
+ WMI_TLV_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_DFS),
+ WMI_TLV_PDEV_DFS_DISABLE_CMDID,
+ WMI_TLV_DFS_PHYERR_FILTER_ENA_CMDID,
+ WMI_TLV_DFS_PHYERR_FILTER_DIS_CMDID,
+ WMI_TLV_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_TLV_GRP_ROAM),
+ WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_TLV_ROAM_SCAN_PERIOD,
+ WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_TLV_ROAM_AP_PROFILE,
+ WMI_TLV_ROAM_CHAN_LIST,
+ WMI_TLV_ROAM_SCAN_CMD,
+ WMI_TLV_ROAM_SYNCH_COMPLETE,
+ WMI_TLV_ROAM_SET_RIC_REQUEST_CMDID,
+ WMI_TLV_ROAM_INVOKE_CMDID,
+ WMI_TLV_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_TLV_GRP_OFL_SCAN),
+ WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_TLV_OFL_SCAN_PERIOD,
+ WMI_TLV_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_TLV_GRP_P2P),
+ WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_TLV_P2P_GO_SET_BEACON_IE,
+ WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
+ WMI_TLV_P2P_DISC_OFFLOAD_CONFIG_CMDID,
+ WMI_TLV_P2P_DISC_OFFLOAD_APPIE_CMDID,
+ WMI_TLV_P2P_DISC_OFFLOAD_PATTERN_CMDID,
+ WMI_TLV_P2P_SET_OPPPS_PARAM_CMDID,
+ WMI_TLV_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_AP_PS),
+ WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RATECTL),
+ WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PROFILE),
+ WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_TLV_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SUSPEND),
+ WMI_TLV_PDEV_RESUME_CMDID,
+ WMI_TLV_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BCN_FILTER),
+ WMI_TLV_RMV_BCN_FILTER_CMDID,
+ WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WOW),
+ WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_TLV_WOW_ENABLE_CMDID,
+ WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_ADD_KEEPALIVE_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_DEL_KEEPALIVE_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_ADD_WAKE_PATTERN_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_DEL_WAKE_PATTERN_CMDID,
+ WMI_TLV_D0_WOW_ENABLE_DISABLE_CMDID,
+ WMI_TLV_EXTWOW_ENABLE_CMDID,
+ WMI_TLV_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
+ WMI_TLV_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
+ WMI_TLV_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RTT),
+ WMI_TLV_RTT_TSF_CMDID,
+ WMI_TLV_SPECTRAL_SCAN_CONF_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SPECTRAL),
+ WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_TLV_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STATS),
+ WMI_TLV_MCC_SCHED_TRAFFIC_STATS_CMDID,
+ WMI_TLV_REQUEST_STATS_EXT_CMDID,
+ WMI_TLV_REQUEST_LINK_STATS_CMDID,
+ WMI_TLV_START_LINK_STATS_CMDID,
+ WMI_TLV_CLEAR_LINK_STATS_CMDID,
+ WMI_TLV_CGET_FW_MEM_DUMP_CMDID,
+ WMI_TLV_CDEBUG_MESG_FLUSH_CMDID,
+ WMI_TLV_CDIAG_EVENT_LOG_CONFIG_CMDID,
+ WMI_TLV_CREQUEST_WLAN_STATS_CMDID,
+ WMI_TLV_CREQUEST_RCPI_CMDID,
+ WMI_TLV_REQUEST_PEER_STATS_INFO_CMDID,
+ WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_ARP_NS_OFL),
+ WMI_TLV_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_TLV_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_NLO_OFL),
+ WMI_TLV_APFIND_CMDID,
+ WMI_TLV_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GTK_OFL),
+ WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CSA_OFL),
+ WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ WMI_TLV_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CHATTER),
+ WMI_TLV_CHATTER_ADD_COALESCING_FILTER_CMDID,
+ WMI_TLV_CHATTER_DELETE_COALESCING_FILTER_CMDID,
+ WMI_TLV_CHATTER_COALESCING_QUERY_CMDID,
+ WMI_TLV_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TID_ADDBA),
+ WMI_TLV_PEER_TID_DELBA_CMDID,
+ WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
+ WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
+ WMI_TLV_STA_KEEPALIVE_CMDID,
+ WMI_TLV_BA_REQ_SSN_CMDID,
+ WMI_TLV_ECHO_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MISC),
+ WMI_TLV_PDEV_UTF_CMDID,
+ WMI_TLV_DBGLOG_CFG_CMDID,
+ WMI_TLV_PDEV_QVIT_CMDID,
+ WMI_TLV_PDEV_FTM_INTG_CMDID,
+ WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_TLV_FORCE_FW_HANG_CMDID,
+ WMI_TLV_SET_MCASTBCAST_FILTER_CMDID,
+ WMI_TLV_THERMAL_MGMT_CMDID,
+ WMI_TLV_HOST_AUTO_SHUTDOWN_CFG_CMDID,
+ WMI_TLV_TPC_CHAINMASK_CONFIG_CMDID,
+ WMI_TLV_SET_ANTENNA_DIVERSITY_CMDID,
+ WMI_TLV_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GPIO),
+ WMI_TLV_GPIO_OUTPUT_CMDID,
+ WMI_TLV_TXBF_CMDID,
+ WMI_TLV_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_FWTEST),
+ WMI_TLV_FWTEST_P2P_SET_NOA_PARAM_CMDID,
+ WMI_TLV_UNIT_TEST_CMDID,
+ WMI_TLV_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TDLS),
+ WMI_TLV_TDLS_PEER_UPDATE_CMDID,
+ WMI_TLV_TDLS_SET_OFFCHAN_MODE_CMDID,
+ WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RESMGR),
+ WMI_TLV_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
+ WMI_TLV_RESMGR_SET_CHAN_LATENCY_CMDID,
+ WMI_TLV_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_SMPS),
+ WMI_TLV_STA_SMPS_PARAM_CMDID,
+ WMI_TLV_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WLAN_HB),
+ WMI_TLV_HB_SET_TCP_PARAMS_CMDID,
+ WMI_TLV_HB_SET_TCP_PKT_FILTER_CMDID,
+ WMI_TLV_HB_SET_UDP_PARAMS_CMDID,
+ WMI_TLV_HB_SET_UDP_PKT_FILTER_CMDID,
+ WMI_TLV_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RMC),
+ WMI_TLV_RMC_SET_ACTION_PERIOD_CMDID,
+ WMI_TLV_RMC_CONFIG_CMDID,
+ WMI_TLV_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MHF_OFL),
+ WMI_TLV_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
+ WMI_TLV_BATCH_SCAN_ENABLE_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_LOCATION_SCAN),
+ WMI_TLV_BATCH_SCAN_DISABLE_CMDID,
+ WMI_TLV_BATCH_SCAN_TRIGGER_RESULT_CMDID,
+ WMI_TLV_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OEM),
+ WMI_TLV_NAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_NAN),
+ WMI_TLV_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_COEX),
+ WMI_TLV_CHAN_AVOID_UPDATE_CMDID,
+ WMI_TLV_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OBSS_OFL),
+ WMI_TLV_OBSS_SCAN_DISABLE_CMDID,
+ WMI_TLV_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_LPI),
+ WMI_TLV_LPI_START_SCAN_CMDID,
+ WMI_TLV_LPI_STOP_SCAN_CMDID,
+ WMI_TLV_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_EXTSCAN),
+ WMI_TLV_EXTSCAN_STOP_CMDID,
+ WMI_TLV_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
+ WMI_TLV_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
+ WMI_TLV_EXTSCAN_GET_CACHED_RESULTS_CMDID,
+ WMI_TLV_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
+ WMI_TLV_EXTSCAN_SET_CAPABILITIES_CMDID,
+ WMI_TLV_EXTSCAN_GET_CAPABILITIES_CMDID,
+ WMI_TLV_SET_DHCP_SERVER_OFFLOAD_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_DHCP_OFL),
+ WMI_TLV_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_IPA),
+ WMI_TLV_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MDNS_OFL),
+ WMI_TLV_MDNS_SET_FQDN_CMDID,
+ WMI_TLV_MDNS_SET_RESPONSE_CMDID,
+ WMI_TLV_MDNS_GET_STATS_CMDID,
+ WMI_TLV_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SAP_OFL),
+};
+
+enum wmi_tlv_event_id {
+ WMI_TLV_SERVICE_READY_EVENTID = 0x1,
+ WMI_TLV_READY_EVENTID,
+ WMI_TLV_SERVICE_AVAILABLE_EVENTID,
+ WMI_TLV_SCAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SCAN),
+ WMI_TLV_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PDEV),
+ WMI_TLV_CHAN_INFO_EVENTID,
+ WMI_TLV_PHYERR_EVENTID,
+ WMI_TLV_PDEV_DUMP_EVENTID,
+ WMI_TLV_TX_PAUSE_EVENTID,
+ WMI_TLV_DFS_RADAR_EVENTID,
+ WMI_TLV_PDEV_L1SS_TRACK_EVENTID,
+ WMI_TLV_PDEV_TEMPERATURE_EVENTID,
+ WMI_TLV_VDEV_START_RESP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_VDEV),
+ WMI_TLV_VDEV_STOPPED_EVENTID,
+ WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+ WMI_TLV_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
+ WMI_TLV_VDEV_TSF_REPORT_EVENTID,
+ WMI_TLV_VDEV_DELETE_RESP_EVENTID,
+ WMI_TLV_PEER_STA_KICKOUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PEER),
+ WMI_TLV_PEER_INFO_EVENTID,
+ WMI_TLV_PEER_TX_FAIL_CNT_THR_EVENTID,
+ WMI_TLV_PEER_ESTIMATED_LINKSPEED_EVENTID,
+ WMI_TLV_PEER_STATE_EVENTID,
+ WMI_TLV_PEER_ASSOC_CONF_EVENTID,
+ WMI_TLV_PEER_DELETE_RESP_EVENTID,
+ WMI_TLV_MGMT_RX_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MGMT),
+ WMI_TLV_HOST_SWBA_EVENTID,
+ WMI_TLV_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID,
+ WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+ WMI_TLV_MGMT_TX_COMPLETION_EVENTID,
+ WMI_TLV_MGMT_TX_BUNDLE_COMPLETION_EVENTID,
+ WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG),
+ WMI_TLV_TX_ADDBA_COMPLETE_EVENTID,
+ WMI_TLV_BA_RSP_SSN_EVENTID,
+ WMI_TLV_AGGR_STATE_TRIG_EVENTID,
+ WMI_TLV_ROAM_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_ROAM),
+ WMI_TLV_PROFILE_MATCH,
+ WMI_TLV_ROAM_SYNCH_EVENTID,
+ WMI_TLV_P2P_DISC_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_P2P),
+ WMI_TLV_P2P_NOA_EVENTID,
+ WMI_TLV_PDEV_RESUME_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SUSPEND),
+ WMI_TLV_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_WOW),
+ WMI_TLV_D0_WOW_DISABLE_ACK_EVENTID,
+ WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_RTT),
+ WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_TLV_RTT_ERROR_REPORT_EVENTID,
+ WMI_TLV_STATS_EXT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_STATS),
+ WMI_TLV_IFACE_LINK_STATS_EVENTID,
+ WMI_TLV_PEER_LINK_STATS_EVENTID,
+ WMI_TLV_RADIO_LINK_STATS_EVENTID,
+ WMI_TLV_UPDATE_FW_MEM_DUMP_EVENTID,
+ WMI_TLV_DIAG_EVENT_LOG_SUPPORTED_EVENTID,
+ WMI_TLV_INST_RSSI_STATS_EVENTID,
+ WMI_TLV_RADIO_TX_POWER_LEVEL_STATS_EVENTID,
+ WMI_TLV_REPORT_STATS_EVENTID,
+ WMI_TLV_UPDATE_RCPI_EVENTID,
+ WMI_TLV_PEER_STATS_INFO_EVENTID,
+ WMI_TLV_NLO_MATCH_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NLO_OFL),
+ WMI_TLV_NLO_SCAN_COMPLETE_EVENTID,
+ WMI_TLV_APFIND_EVENTID,
+ WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GTK_OFL),
+ WMI_TLV_GTK_REKEY_FAIL_EVENTID,
+ WMI_TLV_CSA_HANDLING_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CSA_OFL),
+ WMI_TLV_CHATTER_PC_QUERY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CHATTER),
+ WMI_TLV_ECHO_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MISC),
+ WMI_TLV_PDEV_UTF_EVENTID,
+ WMI_TLV_DEBUG_MESG_EVENTID,
+ WMI_TLV_UPDATE_STATS_EVENTID,
+ WMI_TLV_DEBUG_PRINT_EVENTID,
+ WMI_TLV_DCS_INTERFERENCE_EVENTID,
+ WMI_TLV_PDEV_QVIT_EVENTID,
+ WMI_TLV_WLAN_PROFILE_DATA_EVENTID,
+ WMI_TLV_PDEV_FTM_INTG_EVENTID,
+ WMI_TLV_WLAN_FREQ_AVOID_EVENTID,
+ WMI_TLV_VDEV_GET_KEEPALIVE_EVENTID,
+ WMI_TLV_THERMAL_MGMT_EVENTID,
+ WMI_TLV_DIAG_DATA_CONTAINER_EVENTID,
+ WMI_TLV_HOST_AUTO_SHUTDOWN_EVENTID,
+ WMI_TLV_UPDATE_WHAL_MIB_STATS_EVENTID,
+ WMI_TLV_UPDATE_VDEV_RATE_STATS_EVENTID,
+ WMI_TLV_DIAG_EVENTID,
+ WMI_TLV_GPIO_INPUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GPIO),
+ WMI_TLV_UPLOADH_EVENTID,
+ WMI_TLV_CAPTUREH_EVENTID,
+ WMI_TLV_RFKILL_STATE_CHANGE_EVENTID,
+ WMI_TLV_TDLS_PEER_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_TDLS),
+ WMI_TLV_BATCH_SCAN_ENABLED_EVENTID =
+ WMI_TLV_EV(WMI_TLV_GRP_LOCATION_SCAN),
+ WMI_TLV_BATCH_SCAN_RESULT_EVENTID,
+ WMI_TLV_OEM_CAPABILITY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_OEM),
+ WMI_TLV_OEM_MEASUREMENT_REPORT_EVENTID,
+ WMI_TLV_OEM_ERROR_REPORT_EVENTID,
+ WMI_TLV_NAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NAN),
+ WMI_TLV_LPI_RESULT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_LPI),
+ WMI_TLV_LPI_STATUS_EVENTID,
+ WMI_TLV_LPI_HANDOFF_EVENTID,
+ WMI_TLV_EXTSCAN_START_STOP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_EXTSCAN),
+ WMI_TLV_EXTSCAN_OPERATION_EVENTID,
+ WMI_TLV_EXTSCAN_TABLE_USAGE_EVENTID,
+ WMI_TLV_EXTSCAN_CACHED_RESULTS_EVENTID,
+ WMI_TLV_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
+ WMI_TLV_EXTSCAN_HOTLIST_MATCH_EVENTID,
+ WMI_TLV_EXTSCAN_CAPABILITIES_EVENTID,
+ WMI_TLV_MDNS_STATS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MDNS_OFL),
+ WMI_TLV_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SAP_OFL),
+ WMI_TLV_SAP_OFL_DEL_STA_EVENTID,
+};
+
+enum wmi_tlv_pdev_param {
+ WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
+ WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
+ WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
+ WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
+ WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
+ WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
+ WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
+ WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
+ WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ WMI_TLV_PDEV_PARAM_LTR_ENABLE,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
+ WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
+ WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_PMF_QOS,
+ WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
+ WMI_TLV_PDEV_PARAM_DCS,
+ WMI_TLV_PDEV_PARAM_ANI_ENABLE,
+ WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
+ WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
+ WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
+ WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
+ WMI_TLV_PDEV_PARAM_PROXY_STA,
+ WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
+ WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
+ WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
+ WMI_TLV_PDEV_PARAM_BURST_DUR,
+ WMI_TLV_PDEV_PARAM_BURST_ENABLE,
+ WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
+ WMI_TLV_PDEV_PARAM_LOW_POWER_RF_ENABLE,
+ WMI_TLV_PDEV_PARAM_L1SS_TRACK,
+ WMI_TLV_PDEV_PARAM_HYST_EN,
+ WMI_TLV_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
+ WMI_TLV_PDEV_PARAM_LED_SYS_STATE,
+ WMI_TLV_PDEV_PARAM_LED_ENABLE,
+ WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
+ WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
+ WMI_TLV_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
+ WMI_TLV_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_NONE,
+ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_SAR,
+ WMI_TLV_PDEV_PARAM_PEER_STATS_INFO_ENABLE = 0x8b,
+ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_MAX,
+};
+
+enum wmi_tlv_vdev_param {
+ WMI_TLV_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
+ WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
+ WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
+ WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
+ WMI_TLV_VDEV_PARAM_SLOT_TIME,
+ WMI_TLV_VDEV_PARAM_PREAMBLE,
+ WMI_TLV_VDEV_PARAM_SWBA_TIME,
+ WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
+ WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
+ WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
+ WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
+ WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ WMI_TLV_VDEV_PARAM_WDS,
+ WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
+ WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
+ WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
+ WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
+ WMI_TLV_VDEV_PARAM_FEATURE_WMM,
+ WMI_TLV_VDEV_PARAM_CHWIDTH,
+ WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
+ WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
+ WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
+ WMI_TLV_VDEV_PARAM_MGMT_RATE,
+ WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
+ WMI_TLV_VDEV_PARAM_FIXED_RATE,
+ WMI_TLV_VDEV_PARAM_SGI,
+ WMI_TLV_VDEV_PARAM_LDPC,
+ WMI_TLV_VDEV_PARAM_TX_STBC,
+ WMI_TLV_VDEV_PARAM_RX_STBC,
+ WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
+ WMI_TLV_VDEV_PARAM_DEF_KEYID,
+ WMI_TLV_VDEV_PARAM_NSS,
+ WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
+ WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
+ WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
+ WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
+ WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
+ WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
+ WMI_TLV_VDEV_PARAM_TXBF,
+ WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
+ WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
+ WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_TLV_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ WMI_TLV_VDEV_PARAM_TX_PWRLIMIT,
+ WMI_TLV_VDEV_PARAM_SNR_NUM_FOR_CAL,
+ WMI_TLV_VDEV_PARAM_ROAM_FW_OFFLOAD,
+ WMI_TLV_VDEV_PARAM_ENABLE_RMC,
+ WMI_TLV_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
+ WMI_TLV_VDEV_PARAM_MAX_RATE,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
+ WMI_TLV_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
+ WMI_TLV_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
+ WMI_TLV_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
+ WMI_TLV_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
+ WMI_TLV_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
+ WMI_TLV_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
+ WMI_TLV_VDEV_PARAM_INACTIVITY_CNT,
+ WMI_TLV_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
+ WMI_TLV_VDEV_PARAM_DTIM_POLICY,
+ WMI_TLV_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
+};
+
+enum wmi_tlv_peer_param {
+ WMI_TLV_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
+ WMI_TLV_PEER_AMPDU = 0x2,
+ WMI_TLV_PEER_AUTHORIZE = 0x3,
+ WMI_TLV_PEER_CHAN_WIDTH = 0x4,
+ WMI_TLV_PEER_NSS = 0x5,
+ WMI_TLV_PEER_USE_4ADDR = 0x6,
+ WMI_TLV_PEER_MEMBERSHIP = 0x7,
+ WMI_TLV_PEER_USERPOS = 0x8,
+ WMI_TLV_PEER_CRIT_PROTO_HINT_ENABLED = 0x9,
+ WMI_TLV_PEER_TX_FAIL_CNT_THR = 0xa,
+ WMI_TLV_PEER_SET_HW_RETRY_CTS2S = 0xb,
+ WMI_TLV_PEER_IBSS_ATIM_WINDOW_LENGTH = 0xc,
+ WMI_TLV_PEER_PHYMODE = 0xd,
+ WMI_TLV_PEER_USE_FIXED_PWR = 0xe,
+ WMI_TLV_PEER_DUMMY_VAR = 0xff,
+};
+
+enum wmi_tlv_peer_flags {
+ WMI_TLV_PEER_AUTH = 0x00000001,
+ WMI_TLV_PEER_QOS = 0x00000002,
+ WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_TLV_PEER_APSD = 0x00000800,
+ WMI_TLV_PEER_HT = 0x00001000,
+ WMI_TLV_PEER_40MHZ = 0x00002000,
+ WMI_TLV_PEER_STBC = 0x00008000,
+ WMI_TLV_PEER_LDPC = 0x00010000,
+ WMI_TLV_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_TLV_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_TLV_PEER_VHT = 0x02000000,
+ WMI_TLV_PEER_80MHZ = 0x04000000,
+ WMI_TLV_PEER_PMF = 0x08000000,
+ WMI_TLV_PEER_160MHZ = 0x20000000,
+};
+
+enum wmi_tlv_tag {
+ WMI_TLV_TAG_LAST_RESERVED = 15,
+
+ WMI_TLV_TAG_FIRST_ARRAY_ENUM,
+ WMI_TLV_TAG_ARRAY_UINT32 = WMI_TLV_TAG_FIRST_ARRAY_ENUM,
+ WMI_TLV_TAG_ARRAY_BYTE,
+ WMI_TLV_TAG_ARRAY_STRUCT,
+ WMI_TLV_TAG_ARRAY_FIXED_STRUCT,
+ WMI_TLV_TAG_LAST_ARRAY_ENUM = 31,
+
+ WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT,
+ WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ,
+ WMI_TLV_TAG_STRUCT_READY_EVENT,
+ WMI_TLV_TAG_STRUCT_SCAN_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_TPC_CONFIG_EVENT,
+ WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR,
+ WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_STOPPED_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT,
+ WMI_TLV_TAG_STRUCT_MGMT_RX_HDR,
+ WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EVENT,
+ WMI_TLV_TAG_STRUCT_TX_DELBA_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_TX_ADDBA_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_EVENT,
+ WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO,
+ WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO_SECTION_BITMAP,
+ WMI_TLV_TAG_STRUCT_RTT_EVENT_HEADER,
+ WMI_TLV_TAG_STRUCT_RTT_ERROR_REPORT_EVENT,
+ WMI_TLV_TAG_STRUCT_RTT_MEAS_EVENT,
+ WMI_TLV_TAG_STRUCT_ECHO_EVENT,
+ WMI_TLV_TAG_STRUCT_FTM_INTG_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_EVENT,
+ WMI_TLV_TAG_STRUCT_GPIO_INPUT_EVENT,
+ WMI_TLV_TAG_STRUCT_CSA_EVENT,
+ WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_IGTK_INFO,
+ WMI_TLV_TAG_STRUCT_DCS_INTERFERENCE_EVENT,
+ WMI_TLV_TAG_STRUCT_ATH_DCS_CW_INT,
+ WMI_TLV_TAG_STRUCT_ATH_DCS_WLAN_INT_STAT,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_CTX_T,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_T,
+ WMI_TLV_TAG_STRUCT_PDEV_QVIT_EVENT,
+ WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT,
+ WMI_TLV_TAG_STRUCT_TIM_INFO,
+ WMI_TLV_TAG_STRUCT_P2P_NOA_INFO,
+ WMI_TLV_TAG_STRUCT_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGES_EVENT,
+ WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGE_DESC,
+ WMI_TLV_TAG_STRUCT_GTK_REKEY_FAIL_EVENT,
+ WMI_TLV_TAG_STRUCT_INIT_CMD,
+ WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG,
+ WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK,
+ WMI_TLV_TAG_STRUCT_START_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD,
+ WMI_TLV_TAG_STRUCT_CHANNEL,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_WMM_PARAMS,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_NOA_DESCRIPTOR,
+ WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE,
+ WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_UP_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD,
+ WMI_TLV_TAG_STRUCT_VHT_RATE_SET,
+ WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD,
+ WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD,
+ WMI_TLV_TAG_STRUCT_BCN_PRB_INFO,
+ WMI_TLV_TAG_STRUCT_PEER_TID_ADDBA_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_TID_DELBA_CMD,
+ WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_STA_DTIM_PS_METHOD_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_MODE,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_PERIOD,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD,
+ WMI_TLV_TAG_STRUCT_ADD_BCN_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_RMV_BCN_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD,
+ WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM,
+ WMI_TLV_TAG_STRUCT_SET_ARP_NS_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_ARP_OFFLOAD_TUPLE,
+ WMI_TLV_TAG_STRUCT_NS_OFFLOAD_TUPLE,
+ WMI_TLV_TAG_STRUCT_FTM_INTG_CMD,
+ WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE,
+ WMI_TLV_TAG_STRUCT_P2P_SET_VENDOR_IE_DATA_CMD,
+ WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_RATE_RETRY_SCHED_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_TRIGGER_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_SET_HIST_INTVL_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_GET_PROF_DATA_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD,
+ WMI_TLV_TAG_STRUCT_RTT_MEASREQ_HEAD,
+ WMI_TLV_TAG_STRUCT_RTT_MEASREQ_BODY,
+ WMI_TLV_TAG_STRUCT_RTT_TSF_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_NLO_CONFIGURED_PARAMETERS,
+ WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_CHANSWITCH_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_SET_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_ECHO_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD,
+ WMI_TLV_TAG_STRUCT_GPIO_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_GPIO_OUTPUT_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ADD_WDS_ENTRY_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_REMOVE_WDS_ENTRY_CMD,
+ WMI_TLV_TAG_STRUCT_BCN_TX_HDR,
+ WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD,
+ WMI_TLV_TAG_STRUCT_MGMT_TX_HDR,
+ WMI_TLV_TAG_STRUCT_ADDBA_CLEAR_RESP_CMD,
+ WMI_TLV_TAG_STRUCT_ADDBA_SEND_CMD,
+ WMI_TLV_TAG_STRUCT_DELBA_SEND_CMD,
+ WMI_TLV_TAG_STRUCT_ADDBA_SETRESPONSE_CMD,
+ WMI_TLV_TAG_STRUCT_SEND_SINGLEAMSDU_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_HT_IE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_VHT_IE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_GREEN_AP_PS_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_BASE_MACADDR_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_MCAST_GROUP_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_AP_PROFILE,
+ WMI_TLV_TAG_STRUCT_AP_PROFILE,
+ WMI_TLV_TAG_STRUCT_SCAN_SCH_PRIORITY_TABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_IPV4_SYNC_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_IPV6_SYNC_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_MAGIC_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_SCAN_UPDATE_REQUEST_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_PKT_COALESCING_FILTER,
+ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_ADD_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_DELETE_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_QUERY_CMD,
+ WMI_TLV_TAG_STRUCT_TXBF_CMD,
+ WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_NLO_EVENT,
+ WMI_TLV_TAG_STRUCT_CHATTER_QUERY_REPLY_EVENT,
+ WMI_TLV_TAG_STRUCT_UPLOAD_H_HDR,
+ WMI_TLV_TAG_STRUCT_CAPTURE_H_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_VDEV_WNM_SLEEPMODE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_WMM_ADDTS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_WMM_DELTS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD,
+ WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD,
+ WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT,
+ WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_VDEV_MCC_SET_TBTT_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_CHAN_LIST,
+ WMI_TLV_TAG_STRUCT_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
+ WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD,
+ WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
+ WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_LATENCY_CMD,
+ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD,
+ WMI_TLV_TAG_STRUCT_BA_RSP_SSN_EVENT,
+ WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_SET_MCASTBCAST_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_SET_OPPPS_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_SET_NOA_CMD,
+ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
+ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
+ WMI_TLV_TAG_STRUCT_STA_SMPS_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_GTX_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_MCC_SCHED_TRAFFIC_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_MCC_SCHED_STA_TRAFFIC_STATS,
+ WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT,
+ WMI_TLV_TAG_STRUCT_HB_SET_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_TCP_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_TCP_PKT_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_UDP_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_UDP_PKT_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_HB_IND_EVENT,
+ WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT,
+ WMI_TLV_TAG_STRUCT_RFKILL_EVENT,
+ WMI_TLV_TAG_STRUCT_DFS_RADAR_EVENT,
+ WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_ENA_CMD,
+ WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_DIS_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_SCAN_LIST,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_NETWORK_INFO,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_TRIGGER_RESULT_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLED_EVENT,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_START_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_THERMAL_MGMT_CMD,
+ WMI_TLV_TAG_STRUCT_THERMAL_MGMT_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_INFO_REQ_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_INFO,
+ WMI_TLV_TAG_STRUCT_PEER_TX_FAIL_CNT_THR_EVENT,
+ WMI_TLV_TAG_STRUCT_RMC_SET_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_RMC_SET_ACTION_PERIOD_CMD,
+ WMI_TLV_TAG_STRUCT_RMC_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_SET_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
+ WMI_TLV_TAG_STRUCT_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_NAN_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_NAN_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_PDEV_L1SS_TRACK_EVENT,
+ WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT,
+ WMI_TLV_TAG_STRUCT_MODEM_POWER_STATE_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_PEER_GET_ESTIMATED_LINKSPEED_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ESTIMATED_LINKSPEED_EVENT,
+ WMI_TLV_TAG_STRUCT_AGGR_STATE_TRIG_EVENT,
+ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_REQ_STATS_EXT_CMD,
+ WMI_TLV_TAG_STRUCT_STATS_EXT_EVENT,
+ WMI_TLV_TAG_STRUCT_OBSS_SCAN_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_OBSS_SCAN_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_LED_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_CFG_CMD,
+ WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_EVENT,
+ WMI_TLV_TAG_STRUCT_UPDATE_WHAL_MIB_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_CHAN_AVOID_UPDATE_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_PKT_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_TMR_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_IOAC_ADD_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_IOAC_DEL_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_IOAC_KEEPALIVE_T,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_ADD_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_DEL_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_START_LINK_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_CLEAR_LINK_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_REQUEST_LINK_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_CHANNEL_STATS,
+ WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS,
+ WMI_TLV_TAG_STRUCT_RATE_STATS,
+ WMI_TLV_TAG_STRUCT_PEER_LINK_STATS,
+ WMI_TLV_TAG_STRUCT_WMM_AC_STATS,
+ WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS,
+ WMI_TLV_TAG_STRUCT_LPI_MGMT_SNOOPING_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_START_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_STOP_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_RESULT_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_STATE_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CHANNEL_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_START_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CACHED_RESULTS_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_SET_CAPABILITIES_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CAPABILITIES_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_OPERATION_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_START_STOP_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_TABLE_USAGE_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_RSSI_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CACHED_RESULTS_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MATCH_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CACHE_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_D0_WOW_ENABLE_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_D0_WOW_DISABLE_ACK_EVENT,
+ WMI_TLV_TAG_STRUCT_UNIT_TEST_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_11I_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_11R_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_ESE_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_SYNCH_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_SYNCH_COMPLETE,
+ WMI_TLV_TAG_STRUCT_EXTWOW_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_LPI_HANDOFF_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_RATE_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_RATE_HT_INFO,
+ WMI_TLV_TAG_STRUCT_RIC_REQUEST,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_TEMPERATURE_EVENT,
+ WMI_TLV_TAG_STRUCT_SET_DHCP_SERVER_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_RIC_TSPEC,
+ WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG,
+ WMI_TLV_TAG_STRUCT_IPA_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD,
+ WMI_TLV_TAG_STRUCT_KEY_MATERIAL,
+ WMI_TLV_TAG_STRUCT_TDLS_SET_OFFCHAN_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_SET_LED_FLASHING_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_SET_FQDN_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_SET_RESP_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_GET_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_INVOKE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_RESUME_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_DIVERSITY_CMD,
+ WMI_TLV_TAG_STRUCT_SAP_OFL_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_SAP_OFL_ADD_STA_EVENT,
+ WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT,
+ WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_EVENT,
+ WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_OCB_SET_UTC_TIME_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_START_TIMING_ADVERT_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_STOP_TIMING_ADVERT_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_DCC_GET_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_DCC_CHANNEL_STATS_REQUEST,
+ WMI_TLV_TAG_STRUCT_DCC_GET_STATS_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_DCC_CLEAR_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_CMD,
+ WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_DCC_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_OCB_CHANNEL,
+ WMI_TLV_TAG_STRUCT_OCB_SCHEDULE_ELEMENT,
+ WMI_TLV_TAG_STRUCT_DCC_NDL_STATS_PER_CHANNEL,
+ WMI_TLV_TAG_STRUCT_DCC_NDL_CHAN,
+ WMI_TLV_TAG_STRUCT_QOS_PARAMETER,
+ WMI_TLV_TAG_STRUCT_DCC_NDL_ACTIVE_STATE_CONFIG,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_FILTER_FIXED_PARAM,
+ WMI_TLV_TAG_STRUCT_PASSPOINT_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PASSPOINT_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_SSID_MATCH_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_TSF_TSTAMP_ACTION_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_TSF_REPORT_EVENT,
+ WMI_TLV_TAG_STRUCT_GET_FW_MEM_DUMP,
+ WMI_TLV_TAG_STRUCT_UPDATE_FW_MEM_DUMP,
+ WMI_TLV_TAG_STRUCT_FW_MEM_DUMP_PARAMS,
+ WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH,
+ WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH_COMPLETE,
+ WMI_TLV_TAG_STRUCT_PEER_SET_RATE_REPORT_CONDITION,
+ WMI_TLV_TAG_STRUCT_ROAM_SUBNET_CHANGE_CONFIG,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_IE_CMD,
+ WMI_TLV_TAG_STRUCT_RSSI_BREACH_MONITOR_CONFIG,
+ WMI_TLV_TAG_STRUCT_RSSI_BREACH_EVENT,
+ WMI_TLV_TAG_STRUCT_EVENT_INITIAL_WAKEUP,
+ WMI_TLV_TAG_STRUCT_SOC_SET_PCL_CMD,
+ WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_SOC_HW_MODE_TRANSITION_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_TXRX_STREAMS,
+ WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_IOAC_SOCK_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_ENABLE_ICMPV6_NA_FLT_CMD,
+ WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_CONFIG,
+ WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_SUPPORTED_EVENT,
+ WMI_TLV_TAG_STRUCT_PACKET_FILTER_CONFIG,
+ WMI_TLV_TAG_STRUCT_PACKET_FILTER_ENABLE,
+ WMI_TLV_TAG_STRUCT_SAP_SET_BLACKLIST_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_MGMT_TX_CMD,
+ WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT,
+ WMI_TLV_TAG_STRUCT_SOC_SET_ANTENNA_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_UDP_SVC_OFLD_CMD,
+ WMI_TLV_TAG_STRUCT_LRO_INFO_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_EARLYSTOP_RSSI_THRES_PARAM,
+ WMI_TLV_TAG_STRUCT_SERVICE_READY_EXT_EVENT,
+ WMI_TLV_TAG_STRUCT_MAWC_SENSOR_REPORT_IND_CMD,
+ WMI_TLV_TAG_STRUCT_MAWC_ENABLE_SENSOR_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_CONFIGURE_MAWC_CMD,
+ WMI_TLV_TAG_STRUCT_NLO_CONFIGURE_MAWC_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_MAWC_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ASSOC_CONF_EVENT,
+ WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_AP_PS_EGAP_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_AP_PS_EGAP_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_PMF_OFFLOAD_SET_SA_QUERY_CMD,
+ WMI_TLV_TAG_STRUCT_TRANSFER_DATA_TO_FLASH_CMD,
+ WMI_TLV_TAG_STRUCT_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_SCPC_EVENT,
+ WMI_TLV_TAG_STRUCT_AP_PS_EGAP_INFO_CHAINMASK_LIST,
+ WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_BPF_GET_CAPABILITY_CMD,
+ WMI_TLV_TAG_STRUCT_BPF_CAPABILITY_INFO_EVT,
+ WMI_TLV_TAG_STRUCT_BPF_GET_VDEV_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_BPF_VDEV_STATS_INFO_EVT,
+ WMI_TLV_TAG_STRUCT_BPF_SET_VDEV_INSTRUCTIONS_CMD,
+ WMI_TLV_TAG_STRUCT_BPF_DEL_VDEV_INSTRUCTIONS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_DELETE_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_DELETE_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_DENSE_THRES_PARAM,
+ WMI_TLV_TAG_STRUCT_ENLO_CANDIDATE_SCORE_PARAM,
+ WMI_TLV_TAG_STRUCT_PEER_UPDATE_WDS_ENTRY_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_CONFIG_RATEMASK,
+ WMI_TLV_TAG_STRUCT_PDEV_FIPS_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TX_ANTENNA_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_ANT_SWITCH_TBL_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_CTL_TABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_MIMOGAIN_TABLE_CMD,
+ WMI_TLV_TAG_STRUCT_FWTEST_SET_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ATF_REQUEST,
+ WMI_TLV_TAG_STRUCT_VDEV_ATF_REQUEST,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_ANI_CCK_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_ANI_OFDM_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_INST_RSSI_STATS_RESP,
+ WMI_TLV_TAG_STRUCT_MED_UTIL_REPORT_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_STA_PS_STATECHANGE_EVENT,
+ WMI_TLV_TAG_STRUCT_WDS_ADDR_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_RATECODE_LIST_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_TPC_EVENT,
+ WMI_TLV_TAG_STRUCT_ANI_OFDM_EVENT,
+ WMI_TLV_TAG_STRUCT_ANI_CCK_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_CHANNEL_HOPPING_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_FIPS_EVENT,
+ WMI_TLV_TAG_STRUCT_ATF_PEER_INFO,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_FILTER_NRP_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_QBOOST_CFG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_GPIO_HANDLE,
+ WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES,
+ WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_ANT_CTRL_CHAIN,
+ WMI_TLV_TAG_STRUCT_PEER_CCK_OFDM_RATE_INFO,
+ WMI_TLV_TAG_STRUCT_PEER_MCS_RATE_INFO,
+ WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR,
+ WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM,
+ WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM,
+ WMI_TLV_TAG_STRUCT_MU_REPORT_TOTAL_MU,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_SET_MBO,
+ WMI_TLV_TAG_STRUCT_MIB_STATS_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_NAN_DISC_IFACE_CREATED_EVENT,
+ WMI_TLV_TAG_STRUCT_NAN_DISC_IFACE_DELETED_EVENT,
+ WMI_TLV_TAG_STRUCT_NAN_STARTED_CLUSTER_EVENT,
+ WMI_TLV_TAG_STRUCT_NAN_JOINED_CLUSTER_EVENT,
+ WMI_TLV_TAG_STRUCT_NDI_GET_CAP_REQ,
+ WMI_TLV_TAG_STRUCT_NDP_INITIATOR_REQ,
+ WMI_TLV_TAG_STRUCT_NDP_RESPONDER_REQ,
+ WMI_TLV_TAG_STRUCT_NDP_END_REQ,
+ WMI_TLV_TAG_STRUCT_NDI_CAP_RSP_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_INITIATOR_RSP_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_RESPONDER_RSP_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_END_RSP_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_INDICATION_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_CONFIRM_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_END_INDICATION_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_QUIET_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_PCL_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_MAC_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_HW_MODE_TRANSITION_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_COEX_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_CONFIG_ENHANCED_MCAST_FILTER,
+ WMI_TLV_TAG_STRUCT_CHAN_AVOID_RPT_ALLOW_CMD,
+ WMI_TLV_TAG_STRUCT_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_CUSTOM_AGGR_SIZE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_WAL_POWER_DEBUG_CMD,
+ WMI_TLV_TAG_STRUCT_MAC_PHY_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_HW_MODE_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_SOC_MAC_PHY_HW_MODE_CAPS,
+ WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES_EXT,
+ WMI_TLV_TAG_STRUCT_SOC_HAL_REG_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_VDEV_WISA_CMD,
+ WMI_TLV_TAG_STRUCT_TX_POWER_LEVEL_STATS_EVT,
+ WMI_TLV_TAG_STRUCT_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV,
+ WMI_TLV_TAG_STRUCT_SCAN_ADAPTIVE_DWELL_CONFIG,
+ WMI_TLV_TAG_STRUCT_WOW_SET_ACTION_WAKE_UP_CMD,
+ WMI_TLV_TAG_STRUCT_NDP_END_RSP_PER_NDI,
+ WMI_TLV_TAG_STRUCT_PEER_BWF_REQUEST,
+ WMI_TLV_TAG_STRUCT_BWF_PEER_INFO,
+ WMI_TLV_TAG_STRUCT_DBGLOG_TIME_STAMP_SYNC_CMD,
+ WMI_TLV_TAG_STRUCT_RMC_SET_LEADER_CMD,
+ WMI_TLV_TAG_STRUCT_RMC_MANUAL_LEADER_EVENT,
+ WMI_TLV_TAG_STRUCT_PER_CHAIN_RSSI_STATS,
+ WMI_TLV_TAG_STRUCT_RSSI_STATS,
+ WMI_TLV_TAG_STRUCT_P2P_LO_START_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_LO_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_LO_STOPPED_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_REORDER_QUEUE_SETUP_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_REORDER_QUEUE_REMOVE_CMD,
+ WMI_TLV_TAG_STRUCT_SET_MULTIPLE_MCAST_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT,
+ WMI_TLV_TAG_STRUCT_READ_DATA_FROM_FLASH_CMD,
+ WMI_TLV_TAG_STRUCT_READ_DATA_FROM_FLASH_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_REORDER_TIMEOUT_VAL_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SET_RX_BLOCKSIZE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_TLV_TAG_STRUCT_TLV_BUF_LEN_PARAM,
+ WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO_REQ_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_ANTDIV_STATUS_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_ANTDIV_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_MNT_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_GET_CHIP_POWER_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_CHIP_POWER_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_COEX_GET_ANTENNA_ISOLATION_CMD,
+ WMI_TLV_TAG_STRUCT_COEX_REPORT_ISOLATION_EVENT,
+ WMI_TLV_TAG_STRUCT_CHAN_CCA_STATS,
+ WMI_TLV_TAG_STRUCT_PEER_SIGNAL_STATS,
+ WMI_TLV_TAG_STRUCT_TX_STATS,
+ WMI_TLV_TAG_STRUCT_PEER_AC_TX_STATS,
+ WMI_TLV_TAG_STRUCT_RX_STATS,
+ WMI_TLV_TAG_STRUCT_PEER_AC_RX_STATS,
+ WMI_TLV_TAG_STRUCT_REPORT_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_CHAN_CCA_STATS_THRESH,
+ WMI_TLV_TAG_STRUCT_PEER_SIGNAL_STATS_THRESH,
+ WMI_TLV_TAG_STRUCT_TX_STATS_THRESH,
+ WMI_TLV_TAG_STRUCT_RX_STATS_THRESH,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_STATS_THRESHOLD_CMD,
+ WMI_TLV_TAG_STRUCT_REQUEST_WLAN_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_RX_AGGR_FAILURE_EVENT,
+ WMI_TLV_TAG_STRUCT_RX_AGGR_FAILURE_INFO,
+ WMI_TLV_TAG_STRUCT_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_BAND_TO_MAC,
+ WMI_TLV_TAG_STRUCT_TBTT_OFFSET_INFO,
+ WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EXT_EVENT,
+ WMI_TLV_TAG_STRUCT_SAR_LIMITS_CMD,
+ WMI_TLV_TAG_STRUCT_SAR_LIMIT_CMD_ROW,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_ADFS_CH_CFG_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_ADFS_OCAC_ABORT_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_RADAR_DETECTION_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_ADFS_OCAC_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_DFS_CAC_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_VENDOR_OUI,
+ WMI_TLV_TAG_STRUCT_REQUEST_RCPI_CMD,
+ WMI_TLV_TAG_STRUCT_UPDATE_RCPI_EVENT,
+ WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_STATS_INFO,
+ WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_PKGID_EVENT,
+ WMI_TLV_TAG_STRUCT_CONNECTED_NLO_RSSI_PARAMS,
+ WMI_TLV_TAG_STRUCT_SET_CURRENT_COUNTRY_CMD,
+ WMI_TLV_TAG_STRUCT_REGULATORY_RULE_STRUCT,
+ WMI_TLV_TAG_STRUCT_REG_CHAN_LIST_CC_EVENT,
+ WMI_TLV_TAG_STRUCT_11D_SCAN_START_CMD,
+ WMI_TLV_TAG_STRUCT_11D_SCAN_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_11D_NEW_COUNTRY_EVENT,
+ WMI_TLV_TAG_STRUCT_REQUEST_RADIO_CHAN_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_RADIO_CHAN_STATS,
+ WMI_TLV_TAG_STRUCT_RADIO_CHAN_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_PER_CONFIG,
+ WMI_TLV_TAG_STRUCT_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_BPF_SET_VDEV_ACTIVE_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_HW_DATA_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_CONNECTED_NLO_BSS_BAND_RSSI_PREF,
+ WMI_TLV_TAG_STRUCT_PEER_OPER_MODE_CHANGE_EVENT,
+ WMI_TLV_TAG_STRUCT_CHIP_POWER_SAVE_FAILURE_DETECTED,
+ WMI_TLV_TAG_STRUCT_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_UPDATE_PKT_ROUTING_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_CHECK_CAL_VERSION_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_CHECK_CAL_VERSION_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_DIVERSITY_GAIN_CMD,
+ WMI_TLV_TAG_STRUCT_MAC_PHY_CHAINMASK_COMBO,
+ WMI_TLV_TAG_STRUCT_MAC_PHY_CHAINMASK_CAPABILITY,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_ARP_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_ARP_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_ARP_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_IFACE_OFFLOAD_STATS,
+ WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD_SUB_STRUCT_PARAM,
+ WMI_TLV_TAG_STRUCT_RSSI_CTL_EXT,
+ WMI_TLV_TAG_STRUCT_SINGLE_PHYERR_EXT_RX_HDR,
+ WMI_TLV_TAG_STRUCT_COEX_BT_ACTIVITY_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_TX_POWER_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_TX_POWER_EVENT,
+ WMI_TLV_TAG_STRUCT_OFFCHAN_DATA_TX_COMPL_EVENT,
+ WMI_TLV_TAG_STRUCT_OFFCHAN_DATA_TX_SEND_CMD,
+ WMI_TLV_TAG_STRUCT_TX_SEND_PARAMS,
+ WMI_TLV_TAG_STRUCT_HE_RATE_SET,
+ WMI_TLV_TAG_STRUCT_CONGESTION_STATS,
+ WMI_TLV_TAG_STRUCT_SET_INIT_COUNTRY_CMD,
+ WMI_TLV_TAG_STRUCT_SCAN_DBS_DUTY_CYCLE,
+ WMI_TLV_TAG_STRUCT_SCAN_DBS_DUTY_CYCLE_PARAM_TLV,
+ WMI_TLV_TAG_STRUCT_PDEV_DIV_GET_RSSI_ANTID,
+ WMI_TLV_TAG_STRUCT_THERM_THROT_CONFIG_REQUEST,
+ WMI_TLV_TAG_STRUCT_THERM_THROT_LEVEL_CONFIG_INFO,
+ WMI_TLV_TAG_STRUCT_THERM_THROT_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_THERM_THROT_LEVEL_STATS_INFO,
+ WMI_TLV_TAG_STRUCT_PDEV_DIV_RSSI_ANTID_EVENT,
+ WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CFG_REQ,
+ WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CFG_RSP,
+ WMI_TLV_TAG_STRUCT_OEM_INDIRECT_DATA,
+ WMI_TLV_TAG_STRUCT_OEM_DMA_BUF_RELEASE,
+ WMI_TLV_TAG_STRUCT_OEM_DMA_BUF_RELEASE_ENTRY,
+ WMI_TLV_TAG_STRUCT_PDEV_BSS_CHAN_INFO_REQUEST,
+ WMI_TLV_TAG_STRUCT_PDEV_BSS_CHAN_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_LCA_DISALLOW_CONFIG_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_VDEV_LIMIT_OFFCHAN_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_RSSI_REJECTION_OCE_CONFIG_PARAM,
+ WMI_TLV_TAG_STRUCT_UNIT_TEST_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_FILS_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_PDEV_UPDATE_PMK_CACHE_CMD,
+ WMI_TLV_TAG_STRUCT_PMK_CACHE,
+ WMI_TLV_TAG_STRUCT_PDEV_UPDATE_FILS_HLP_PKT_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_FILS_SYNCH_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_EXTENDED_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_BG_SCAN_ROAMING_PARAM,
+ WMI_TLV_TAG_STRUCT_OIC_PING_OFFLOAD_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_OIC_PING_OFFLOAD_SET_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_OIC_PING_HANDOFF_EVENT,
+ WMI_TLV_TAG_STRUCT_DHCP_LEASE_RENEW_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_DHCP_LEASE_RENEW_EVENT,
+ WMI_TLV_TAG_STRUCT_BTM_CONFIG,
+ WMI_TLV_TAG_STRUCT_DEBUG_MESG_FW_DATA_STALL_PARAM,
+ WMI_TLV_TAG_STRUCT_WLM_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_UPDATE_CTLTABLE_REQUEST,
+ WMI_TLV_TAG_STRUCT_PDEV_UPDATE_CTLTABLE_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_CND_SCORING_PARAM,
+ WMI_TLV_TAG_STRUCT_PDEV_CONFIG_VENDOR_OUI_ACTION,
+ WMI_TLV_TAG_STRUCT_VENDOR_OUI_EXT,
+ WMI_TLV_TAG_STRUCT_ROAM_SYNCH_FRAME_EVENT,
+ WMI_TLV_TAG_STRUCT_FD_SEND_FROM_HOST_CMD,
+ WMI_TLV_TAG_STRUCT_ENABLE_FILS_CMD,
+ WMI_TLV_TAG_STRUCT_HOST_SWFDA_EVENT,
+
+ WMI_TLV_TAG_MAX
+};
+
+enum wmi_tlv_service {
+ WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_TLV_SERVICE_SCAN_OFFLOAD,
+ WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
+ WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_TLV_SERVICE_STA_PWRSAVE,
+ WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_TLV_SERVICE_AP_UAPSD,
+ WMI_TLV_SERVICE_AP_DFS,
+ WMI_TLV_SERVICE_11AC,
+ WMI_TLV_SERVICE_BLOCKACK,
+ WMI_TLV_SERVICE_PHYERR,
+ WMI_TLV_SERVICE_BCN_FILTER,
+ WMI_TLV_SERVICE_RTT,
+ WMI_TLV_SERVICE_WOW,
+ WMI_TLV_SERVICE_RATECTRL_CACHE,
+ WMI_TLV_SERVICE_IRAM_TIDS,
+ WMI_TLV_SERVICE_ARPNS_OFFLOAD,
+ WMI_TLV_SERVICE_NLO,
+ WMI_TLV_SERVICE_GTK_OFFLOAD,
+ WMI_TLV_SERVICE_SCAN_SCH,
+ WMI_TLV_SERVICE_CSA_OFFLOAD,
+ WMI_TLV_SERVICE_CHATTER,
+ WMI_TLV_SERVICE_COEX_FREQAVOID,
+ WMI_TLV_SERVICE_PACKET_POWER_SAVE,
+ WMI_TLV_SERVICE_FORCE_FW_HANG,
+ WMI_TLV_SERVICE_GPIO,
+ WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_TLV_SERVICE_STA_KEEP_ALIVE,
+ WMI_TLV_SERVICE_TX_ENCAP,
+ WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_TLV_SERVICE_EARLY_RX,
+ WMI_TLV_SERVICE_STA_SMPS,
+ WMI_TLV_SERVICE_FWTEST,
+ WMI_TLV_SERVICE_STA_WMMAC,
+ WMI_TLV_SERVICE_TDLS,
+ WMI_TLV_SERVICE_BURST,
+ WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
+ WMI_TLV_SERVICE_ADAPTIVE_OCS,
+ WMI_TLV_SERVICE_BA_SSN_SUPPORT,
+ WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
+ WMI_TLV_SERVICE_WLAN_HB,
+ WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
+ WMI_TLV_SERVICE_BATCH_SCAN,
+ WMI_TLV_SERVICE_QPOWER,
+ WMI_TLV_SERVICE_PLMREQ,
+ WMI_TLV_SERVICE_THERMAL_MGMT,
+ WMI_TLV_SERVICE_RMC,
+ WMI_TLV_SERVICE_MHF_OFFLOAD,
+ WMI_TLV_SERVICE_COEX_SAR,
+ WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
+ WMI_TLV_SERVICE_NAN,
+ WMI_TLV_SERVICE_L1SS_STAT,
+ WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
+ WMI_TLV_SERVICE_OBSS_SCAN,
+ WMI_TLV_SERVICE_TDLS_OFFCHAN,
+ WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_TLV_SERVICE_IBSS_PWRSAVE,
+ WMI_TLV_SERVICE_LPASS,
+ WMI_TLV_SERVICE_EXTSCAN,
+ WMI_TLV_SERVICE_D0WOW,
+ WMI_TLV_SERVICE_HSOFFLOAD,
+ WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
+ WMI_TLV_SERVICE_RX_FULL_REORDER,
+ WMI_TLV_SERVICE_DHCP_OFFLOAD,
+ WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
+ WMI_TLV_SERVICE_MDNS_OFFLOAD,
+ WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+ WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT,
+ WMI_TLV_SERVICE_OCB,
+ WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD,
+ WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT,
+ WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD,
+ WMI_TLV_SERVICE_MGMT_TX_HTT,
+ WMI_TLV_SERVICE_MGMT_TX_WMI,
+ WMI_TLV_SERVICE_EXT_MSG,
+ WMI_TLV_SERVICE_MAWC,
+ WMI_TLV_SERVICE_PEER_ASSOC_CONF,
+ WMI_TLV_SERVICE_EGAP,
+ WMI_TLV_SERVICE_STA_PMF_OFFLOAD,
+ WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY,
+ WMI_TLV_SERVICE_ENHANCED_PROXY_STA,
+ WMI_TLV_SERVICE_ATF,
+ WMI_TLV_SERVICE_COEX_GPIO,
+ WMI_TLV_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_TLV_SERVICE_ENTERPRISE_MESH,
+ WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_TLV_SERVICE_BPF_OFFLOAD,
+ WMI_TLV_SERVICE_SYNC_DELETE_CMDS,
+ WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES,
+ WMI_TLV_SERVICE_NAN_DATA,
+ WMI_TLV_SERVICE_NAN_RTT,
+ WMI_TLV_SERVICE_11AX,
+ WMI_TLV_SERVICE_DEPRECATED_REPLACE,
+ WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
+ WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER,
+ WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_TLV_SERVICE_MESH_11S,
+ WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT,
+ WMI_TLV_SERVICE_VDEV_RX_FILTER,
+ WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT,
+ WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET,
+ WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET,
+ WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER,
+ WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT,
+ WMI_TLV_SERVICE_WLAN_STATS_REPORT,
+ WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT,
+ WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD,
+ WMI_TLV_SERVICE_RCPI_SUPPORT,
+ WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT,
+ WMI_TLV_SERVICE_PEER_STATS_INFO,
+ WMI_TLV_SERVICE_REGULATORY_DB,
+ WMI_TLV_SERVICE_11D_OFFLOAD,
+ WMI_TLV_SERVICE_HW_DATA_FILTERING,
+ WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART,
+ WMI_TLV_SERVICE_PKT_ROUTING,
+ WMI_TLV_SERVICE_CHECK_CAL_VERSION,
+ WMI_TLV_SERVICE_OFFCHAN_TX_WMI,
+ WMI_TLV_SERVICE_8SS_TX_BFEE,
+ WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT,
+ WMI_TLV_SERVICE_ACK_TIMEOUT,
+ WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64,
+ WMI_TLV_MAX_SERVICE = 128,
+
+/* NOTE:
+ * The above service flags are delivered in the wmi_service_bitmap field
+ * of the WMI_TLV_SERVICE_READY_EVENT message.
+ * The below service flags are delivered in a WMI_TLV_SERVICE_AVAILABLE_EVENT
+ * message rather than in the WMI_TLV_SERVICE_READY_EVENT message's
+ * wmi_service_bitmap field.
+ * The WMI_TLV_SERVICE_AVAILABLE_EVENT message immediately precedes the
+ * WMI_TLV_SERVICE_READY_EVENT message.
+ */
+
+ WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128,
+ WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT,
+ WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT,
+ WMI_TLV_SERVICE_FILS_SUPPORT,
+ WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD,
+ WMI_TLV_SERVICE_WLAN_DHCP_RENEW,
+ WMI_TLV_SERVICE_MAWC_SUPPORT,
+ WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG,
+ WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT,
+ WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT,
+ WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT,
+ WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT,
+ WMI_TLV_SERVICE_THERM_THROT,
+ WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT,
+ WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN,
+ WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143,
+ WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144,
+ WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145,
+ WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146,
+ WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147,
+ WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148,
+ WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149,
+ WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150,
+ WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151,
+ WMI_TLV_SERVICE_STA_TWT = 152,
+ WMI_TLV_SERVICE_AP_TWT = 153,
+ WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154,
+ WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155,
+ WMI_TLV_SERVICE_PEER_TID_CONFIGS_SUPPORT = 156,
+ WMI_TLV_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT = 157,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT = 158,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT = 159,
+ WMI_TLV_SERVICE_MOTION_DET = 160,
+ WMI_TLV_SERVICE_INFRA_MBSSID = 161,
+ WMI_TLV_SERVICE_OBSS_SPATIAL_REUSE = 162,
+ WMI_TLV_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT = 163,
+ WMI_TLV_SERVICE_NAN_DBS_SUPPORT = 164,
+ WMI_TLV_SERVICE_NDI_DBS_SUPPORT = 165,
+ WMI_TLV_SERVICE_NAN_SAP_SUPPORT = 166,
+ WMI_TLV_SERVICE_NDI_SAP_SUPPORT = 167,
+ WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT = 168,
+ WMI_TLV_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_1 = 169,
+ WMI_TLV_SERVICE_ESP_SUPPORT = 170,
+ WMI_TLV_SERVICE_PEER_CHWIDTH_CHANGE = 171,
+ WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172,
+ WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173,
+ WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
+ WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
+ WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
+ WMI_TLV_SERVICE_COEX_SUPPORT_UNEQUAL_ISOLATION = 177,
+ WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT = 178,
+ WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS = 179,
+
+ WMI_TLV_MAX_EXT_SERVICE = 256,
+};
+
+#define WMI_TLV_EXT_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
+ ((svc_id) < (WMI_TLV_MAX_EXT_SERVICE) && \
+ (svc_id) >= (len) && \
+ __le32_to_cpu((wmi_svc_bmap)[((svc_id) - (len)) / 32]) & \
+ BIT(((((svc_id) - (len)) % 32) & 0x1f)))
+
+#define SVCMAP(x, y, len) \
+ do { \
+ if ((WMI_SERVICE_IS_ENABLED((in), (x), (len))) || \
+ (WMI_TLV_EXT_SERVICE_IS_ENABLED((in), (x), (len)))) \
+ __set_bit(y, out); \
+ } while (0)
+
+static inline void
+wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
+{
+ SVCMAP(WMI_TLV_SERVICE_BEACON_OFFLOAD,
+ WMI_SERVICE_BEACON_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
+ WMI_SERVICE_ROAM_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_PWRSAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_UAPSD, len);
+ SVCMAP(WMI_TLV_SERVICE_AP_DFS,
+ WMI_SERVICE_AP_DFS, len);
+ SVCMAP(WMI_TLV_SERVICE_11AC,
+ WMI_SERVICE_11AC, len);
+ SVCMAP(WMI_TLV_SERVICE_BLOCKACK,
+ WMI_SERVICE_BLOCKACK, len);
+ SVCMAP(WMI_TLV_SERVICE_PHYERR,
+ WMI_SERVICE_PHYERR, len);
+ SVCMAP(WMI_TLV_SERVICE_BCN_FILTER,
+ WMI_SERVICE_BCN_FILTER, len);
+ SVCMAP(WMI_TLV_SERVICE_RTT,
+ WMI_SERVICE_RTT, len);
+ SVCMAP(WMI_TLV_SERVICE_WOW,
+ WMI_SERVICE_WOW, len);
+ SVCMAP(WMI_TLV_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_RATECTRL_CACHE, len);
+ SVCMAP(WMI_TLV_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_IRAM_TIDS, len);
+ SVCMAP(WMI_TLV_SERVICE_ARPNS_OFFLOAD,
+ WMI_SERVICE_ARPNS_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_NLO,
+ WMI_SERVICE_NLO, len);
+ SVCMAP(WMI_TLV_SERVICE_GTK_OFFLOAD,
+ WMI_SERVICE_GTK_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_SCAN_SCH,
+ WMI_SERVICE_SCAN_SCH, len);
+ SVCMAP(WMI_TLV_SERVICE_CSA_OFFLOAD,
+ WMI_SERVICE_CSA_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_CHATTER,
+ WMI_SERVICE_CHATTER, len);
+ SVCMAP(WMI_TLV_SERVICE_COEX_FREQAVOID,
+ WMI_SERVICE_COEX_FREQAVOID, len);
+ SVCMAP(WMI_TLV_SERVICE_PACKET_POWER_SAVE,
+ WMI_SERVICE_PACKET_POWER_SAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_FORCE_FW_HANG, len);
+ SVCMAP(WMI_TLV_SERVICE_GPIO,
+ WMI_SERVICE_GPIO, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_KEEP_ALIVE,
+ WMI_SERVICE_STA_KEEP_ALIVE, len);
+ SVCMAP(WMI_TLV_SERVICE_TX_ENCAP,
+ WMI_SERVICE_TX_ENCAP, len);
+ SVCMAP(WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
+ SVCMAP(WMI_TLV_SERVICE_EARLY_RX,
+ WMI_SERVICE_EARLY_RX, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_SMPS,
+ WMI_SERVICE_STA_SMPS, len);
+ SVCMAP(WMI_TLV_SERVICE_FWTEST,
+ WMI_SERVICE_FWTEST, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_WMMAC,
+ WMI_SERVICE_STA_WMMAC, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS,
+ WMI_SERVICE_TDLS, len);
+ SVCMAP(WMI_TLV_SERVICE_BURST,
+ WMI_SERVICE_BURST, len);
+ SVCMAP(WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
+ WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE, len);
+ SVCMAP(WMI_TLV_SERVICE_ADAPTIVE_OCS,
+ WMI_SERVICE_ADAPTIVE_OCS, len);
+ SVCMAP(WMI_TLV_SERVICE_BA_SSN_SUPPORT,
+ WMI_SERVICE_BA_SSN_SUPPORT, len);
+ SVCMAP(WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
+ WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE, len);
+ SVCMAP(WMI_TLV_SERVICE_WLAN_HB,
+ WMI_SERVICE_WLAN_HB, len);
+ SVCMAP(WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
+ WMI_SERVICE_LTE_ANT_SHARE_SUPPORT, len);
+ SVCMAP(WMI_TLV_SERVICE_BATCH_SCAN,
+ WMI_SERVICE_BATCH_SCAN, len);
+ SVCMAP(WMI_TLV_SERVICE_QPOWER,
+ WMI_SERVICE_QPOWER, len);
+ SVCMAP(WMI_TLV_SERVICE_PLMREQ,
+ WMI_SERVICE_PLMREQ, len);
+ SVCMAP(WMI_TLV_SERVICE_THERMAL_MGMT,
+ WMI_SERVICE_THERMAL_MGMT, len);
+ SVCMAP(WMI_TLV_SERVICE_RMC,
+ WMI_SERVICE_RMC, len);
+ SVCMAP(WMI_TLV_SERVICE_MHF_OFFLOAD,
+ WMI_SERVICE_MHF_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_COEX_SAR,
+ WMI_SERVICE_COEX_SAR, len);
+ SVCMAP(WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
+ WMI_SERVICE_BCN_TXRATE_OVERRIDE, len);
+ SVCMAP(WMI_TLV_SERVICE_NAN,
+ WMI_SERVICE_NAN, len);
+ SVCMAP(WMI_TLV_SERVICE_L1SS_STAT,
+ WMI_SERVICE_L1SS_STAT, len);
+ SVCMAP(WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
+ WMI_SERVICE_ESTIMATE_LINKSPEED, len);
+ SVCMAP(WMI_TLV_SERVICE_OBSS_SCAN,
+ WMI_SERVICE_OBSS_SCAN, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS_OFFCHAN,
+ WMI_SERVICE_TDLS_OFFCHAN, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, len);
+ SVCMAP(WMI_TLV_SERVICE_IBSS_PWRSAVE,
+ WMI_SERVICE_IBSS_PWRSAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_LPASS,
+ WMI_SERVICE_LPASS, len);
+ SVCMAP(WMI_TLV_SERVICE_EXTSCAN,
+ WMI_SERVICE_EXTSCAN, len);
+ SVCMAP(WMI_TLV_SERVICE_D0WOW,
+ WMI_SERVICE_D0WOW, len);
+ SVCMAP(WMI_TLV_SERVICE_HSOFFLOAD,
+ WMI_SERVICE_HSOFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
+ WMI_SERVICE_ROAM_HO_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_RX_FULL_REORDER,
+ WMI_SERVICE_RX_FULL_REORDER, len);
+ SVCMAP(WMI_TLV_SERVICE_DHCP_OFFLOAD,
+ WMI_SERVICE_DHCP_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
+ WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, len);
+ SVCMAP(WMI_TLV_SERVICE_MDNS_OFFLOAD,
+ WMI_SERVICE_MDNS_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+ WMI_SERVICE_SAP_AUTH_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_MGMT_TX_WMI,
+ WMI_SERVICE_MGMT_TX_WMI, len);
+ SVCMAP(WMI_TLV_SERVICE_MESH_11S,
+ WMI_SERVICE_MESH_11S, len);
+ SVCMAP(WMI_TLV_SERVICE_SYNC_DELETE_CMDS,
+ WMI_SERVICE_SYNC_DELETE_CMDS, len);
+ SVCMAP(WMI_TLV_SERVICE_PEER_STATS_INFO,
+ WMI_SERVICE_PEER_STATS, len);
+}
+
+static inline void
+wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
+{
+ SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT,
+ WMI_SERVICE_SPOOF_MAC_SUPPORT,
+ WMI_TLV_MAX_SERVICE);
+ SVCMAP(WMI_TLV_SERVICE_THERM_THROT,
+ WMI_SERVICE_THERM_THROT,
+ WMI_TLV_MAX_SERVICE);
+ SVCMAP(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI,
+ WMI_SERVICE_TX_DATA_ACK_RSSI, WMI_TLV_MAX_SERVICE);
+ SVCMAP(WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS,
+ WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
+ WMI_TLV_MAX_SERVICE);
+}
+
+#undef SVCMAP
+
+struct wmi_tlv {
+ __le16 len;
+ __le16 tag;
+ u8 value[];
+} __packed;
+
+struct ath10k_mgmt_tx_pkt_addr {
+ void *vaddr;
+ dma_addr_t paddr;
+};
+
+struct chan_info_params {
+ u32 err_code;
+ u32 freq;
+ u32 cmd_flags;
+ u32 noise_floor;
+ u32 rx_clear_count;
+ u32 cycle_count;
+ u32 mac_clk_mhz;
+};
+
+#define WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL BIT(9)
+
+struct wmi_tlv_chan_info_event {
+ __le32 err_code;
+ __le32 freq;
+ __le32 cmd_flags;
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+ __le32 chan_tx_pwr_range;
+ __le32 chan_tx_pwr_tp;
+ __le32 rx_frame_count;
+ __le32 my_bss_rx_cycle_count;
+ __le32 rx_11b_mode_data_duration;
+ __le32 tx_frame_cnt;
+ __le32 mac_clk_mhz;
+} __packed;
+
+struct wmi_tlv_mgmt_tx_compl_ev {
+ __le32 desc_id;
+ __le32 status;
+ __le32 pdev_id;
+ __le32 ppdu_id;
+ __le32 ack_rssi;
+};
+
+#define WMI_TLV_MGMT_RX_NUM_RSSI 4
+
+struct wmi_tlv_mgmt_rx_ev {
+ __le32 channel;
+ __le32 snr;
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status;
+ __le32 rssi[WMI_TLV_MGMT_RX_NUM_RSSI];
+} __packed;
+
+struct wmi_tlv_abi_version {
+ __le32 abi_ver0;
+ __le32 abi_ver1;
+ __le32 abi_ver_ns0;
+ __le32 abi_ver_ns1;
+ __le32 abi_ver_ns2;
+ __le32 abi_ver_ns3;
+} __packed;
+
+enum wmi_tlv_hw_bd_id {
+ WMI_TLV_HW_BD_LEGACY = 0,
+ WMI_TLV_HW_BD_QCA6174 = 1,
+ WMI_TLV_HW_BD_QCA2582 = 2,
+};
+
+struct wmi_tlv_hw_bd_info {
+ u8 rev;
+ u8 project_id;
+ u8 custom_id;
+ u8 reference_design_id;
+} __packed;
+
+struct wmi_tlv_svc_rdy_ev {
+ __le32 fw_build_vers;
+ struct wmi_tlv_abi_version abi;
+ __le32 phy_capability;
+ __le32 max_frag_entry;
+ __le32 num_rf_chains;
+ __le32 ht_cap_info;
+ __le32 vht_cap_info;
+ __le32 vht_supp_mcs;
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable;
+ __le32 max_bcn_ie_size;
+ __le32 num_mem_reqs;
+ __le32 max_num_scan_chans;
+ __le32 hw_bd_id; /* 0 means hw_bd_info is invalid */
+ struct wmi_tlv_hw_bd_info hw_bd_info[5];
+} __packed;
+
+struct wmi_tlv_rdy_ev {
+ struct wmi_tlv_abi_version abi;
+ struct wmi_mac_addr mac_addr;
+ __le32 status;
+} __packed;
+
+struct wmi_tlv_resource_config {
+ __le32 num_vdevs;
+ __le32 num_peers;
+ __le32 num_offload_peers;
+ __le32 num_offload_reorder_bufs;
+ __le32 num_peer_keys;
+ __le32 num_tids;
+ __le32 ast_skid_limit;
+ __le32 tx_chain_mask;
+ __le32 rx_chain_mask;
+ __le32 rx_timeout_pri[4];
+ __le32 rx_decap_mode;
+ __le32 scan_max_pending_reqs;
+ __le32 bmiss_offload_max_vdev;
+ __le32 roam_offload_max_vdev;
+ __le32 roam_offload_max_ap_profiles;
+ __le32 num_mcast_groups;
+ __le32 num_mcast_table_elems;
+ __le32 mcast2ucast_mode;
+ __le32 tx_dbg_log_size;
+ __le32 num_wds_entries;
+ __le32 dma_burst_size;
+ __le32 mac_aggr_delim;
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+ __le32 vow_config;
+ __le32 gtk_offload_max_vdev;
+ __le32 num_msdu_desc;
+ __le32 max_frag_entries;
+ __le32 num_tdls_vdevs;
+ __le32 num_tdls_conn_table_entries;
+ __le32 beacon_tx_offload_max_vdev;
+ __le32 num_multicast_filter_entries;
+ __le32 num_wow_filters;
+ __le32 num_keep_alive_pattern;
+ __le32 keep_alive_pattern_size;
+ __le32 max_tdls_concurrent_sleep_sta;
+ __le32 max_tdls_concurrent_buffer_sta;
+ __le32 wmi_send_separate;
+ __le32 num_ocb_vdevs;
+ __le32 num_ocb_channels;
+ __le32 num_ocb_schedules;
+ __le32 host_capab;
+} __packed;
+
+/* structure describing host memory chunk. */
+struct host_memory_chunk_tlv {
+ /* id of the request that is passed up in service ready */
+ __le32 req_id;
+
+ /* the physical address the memory chunk */
+ __le32 ptr;
+
+ /* size of the chunk */
+ __le32 size;
+
+ /* the upper 32 bit address valid only for more than 32 bit target */
+ __le32 ptr_high;
+} __packed;
+
+struct wmi_tlv_init_cmd {
+ struct wmi_tlv_abi_version abi;
+ __le32 num_host_mem_chunks;
+} __packed;
+
+struct wmi_tlv_pdev_get_temp_cmd {
+ __le32 pdev_id; /* not used */
+} __packed;
+
+struct wmi_tlv_pdev_temperature_event {
+ __le32 tlv_hdr;
+ /* temperature value in Celsius degree */
+ __le32 temperature;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_tlv_pdev_set_param_cmd {
+ __le32 pdev_id; /* not used yet */
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+struct wmi_tlv_pdev_set_rd_cmd {
+ __le32 pdev_id; /* not used yet */
+ __le32 regd;
+ __le32 regd_2ghz;
+ __le32 regd_5ghz;
+ __le32 conform_limit_2ghz;
+ __le32 conform_limit_5ghz;
+} __packed;
+
+struct wmi_tlv_scan_chan_list_cmd {
+ __le32 num_scan_chans;
+} __packed;
+
+struct wmi_scan_prob_req_oui_cmd {
+/* OUI to be used in Probe Request frame when random MAC address is
+ * requested part of scan parameters. This is applied to both FW internal
+ * scans and host initiated scans. Host can request for random MAC address
+ * with WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ flag.
+ */
+ __le32 prob_req_oui;
+} __packed;
+
+struct wmi_tlv_start_scan_cmd {
+ struct wmi_start_scan_common common;
+ __le32 burst_duration_ms;
+ __le32 num_channels;
+ __le32 num_bssids;
+ __le32 num_ssids;
+ __le32 ie_len;
+ __le32 num_probes;
+ struct wmi_mac_addr mac_addr;
+ struct wmi_mac_addr mac_mask;
+} __packed;
+
+enum wmi_tlv_vdev_subtype {
+ WMI_TLV_VDEV_SUBTYPE_NONE = 0,
+ WMI_TLV_VDEV_SUBTYPE_P2P_DEV = 1,
+ WMI_TLV_VDEV_SUBTYPE_P2P_CLI = 2,
+ WMI_TLV_VDEV_SUBTYPE_P2P_GO = 3,
+ WMI_TLV_VDEV_SUBTYPE_PROXY_STA = 4,
+ WMI_TLV_VDEV_SUBTYPE_MESH = 5,
+ WMI_TLV_VDEV_SUBTYPE_MESH_11S = 6,
+};
+
+struct wmi_tlv_vdev_start_cmd {
+ __le32 vdev_id;
+ __le32 requestor_id;
+ __le32 bcn_intval;
+ __le32 dtim_period;
+ __le32 flags;
+ struct wmi_ssid ssid;
+ __le32 bcn_tx_rate;
+ __le32 bcn_tx_power;
+ __le32 num_noa_descr;
+ __le32 disable_hw_ack;
+} __packed;
+
+enum {
+ WMI_TLV_PEER_TYPE_DEFAULT = 0, /* generic / non-BSS / self-peer */
+ WMI_TLV_PEER_TYPE_BSS = 1,
+ WMI_TLV_PEER_TYPE_TDLS = 2,
+ WMI_TLV_PEER_TYPE_HOST_MAX = 127,
+ WMI_TLV_PEER_TYPE_ROAMOFFLOAD_TMP = 128,
+};
+
+struct wmi_tlv_peer_create_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_addr;
+ __le32 peer_type;
+} __packed;
+
+struct wmi_tlv_peer_assoc_cmd {
+ struct wmi_mac_addr mac_addr;
+ __le32 vdev_id;
+ __le32 new_assoc;
+ __le32 assoc_id;
+ __le32 flags;
+ __le32 caps;
+ __le32 listen_intval;
+ __le32 ht_caps;
+ __le32 max_mpdu;
+ __le32 mpdu_density;
+ __le32 rate_caps;
+ __le32 nss;
+ __le32 vht_caps;
+ __le32 phy_mode;
+ __le32 ht_info[2];
+ __le32 num_legacy_rates;
+ __le32 num_ht_rates;
+} __packed;
+
+struct wmi_tlv_pdev_suspend {
+ __le32 pdev_id; /* not used yet */
+ __le32 opt;
+} __packed;
+
+struct wmi_tlv_pdev_set_wmm_cmd {
+ __le32 pdev_id; /* not used yet */
+ __le32 dg_type; /* no idea.. */
+} __packed;
+
+struct wmi_tlv_vdev_wmm_params {
+ __le32 dummy;
+ struct wmi_wmm_params params;
+} __packed;
+
+struct wmi_tlv_vdev_set_wmm_cmd {
+ __le32 vdev_id;
+ struct wmi_tlv_vdev_wmm_params vdev_wmm_params[4];
+} __packed;
+
+struct wmi_tlv_phyerr_ev {
+ __le32 num_phyerrs;
+ __le32 tsf_l32;
+ __le32 tsf_u32;
+ __le32 buf_len;
+} __packed;
+
+enum wmi_tlv_dbglog_param {
+ WMI_TLV_DBGLOG_PARAM_LOG_LEVEL = 1,
+ WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE,
+ WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE,
+ WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE_BITMAP,
+ WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE_BITMAP,
+};
+
+enum wmi_tlv_dbglog_log_level {
+ WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE = 0,
+ WMI_TLV_DBGLOG_LOG_LEVEL_INFO,
+ WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_1,
+ WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_2,
+ WMI_TLV_DBGLOG_LOG_LEVEL_WARN,
+ WMI_TLV_DBGLOG_LOG_LEVEL_ERR,
+};
+
+#define WMI_TLV_DBGLOG_BITMAP_MAX_IDS 512
+#define WMI_TLV_DBGLOG_BITMAP_MAX_WORDS (WMI_TLV_DBGLOG_BITMAP_MAX_IDS / \
+ sizeof(__le32))
+#define WMI_TLV_DBGLOG_ALL_MODULES 0xffff
+#define WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(module_id, log_level) \
+ (((module_id << 16) & 0xffff0000) | \
+ ((log_level << 0) & 0x000000ff))
+
+struct wmi_tlv_dbglog_cmd {
+ __le32 param;
+ __le32 value;
+} __packed;
+
+struct wmi_tlv_resume_cmd {
+ __le32 reserved;
+} __packed;
+
+struct wmi_tlv_req_stats_cmd {
+ __le32 stats_id; /* wmi_stats_id */
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+#define WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT 31
+#define WMI_TLV_PEER_RX_DURATION_HIGH_MASK GENMASK(30, 0)
+#define WMI_TLV_PEER_RX_DURATION_SHIFT 32
+
+struct wmi_tlv_peer_stats_extd {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 rx_duration;
+ __le32 peer_tx_bytes;
+ __le32 peer_rx_bytes;
+ __le32 last_tx_rate_code;
+ __le32 last_tx_power;
+ __le32 rx_mc_bc_cnt;
+ __le32 rx_duration_high;
+ __le32 reserved[2];
+} __packed;
+
+struct wmi_tlv_vdev_stats {
+ __le32 vdev_id;
+ __le32 beacon_snr;
+ __le32 data_snr;
+ __le32 num_tx_frames[4]; /* per-AC */
+ __le32 num_rx_frames;
+ __le32 num_tx_frames_retries[4];
+ __le32 num_tx_frames_failures[4];
+ __le32 num_rts_fail;
+ __le32 num_rts_success;
+ __le32 num_rx_err;
+ __le32 num_rx_discard;
+ __le32 num_tx_not_acked;
+ __le32 tx_rate_history[10];
+ __le32 beacon_rssi_history[10];
+} __packed;
+
+struct wmi_tlv_pktlog_enable {
+ __le32 reserved;
+ __le32 filter;
+} __packed;
+
+struct wmi_tlv_pktlog_disable {
+ __le32 reserved;
+} __packed;
+
+enum wmi_tlv_bcn_tx_status {
+ WMI_TLV_BCN_TX_STATUS_OK,
+ WMI_TLV_BCN_TX_STATUS_XRETRY,
+ WMI_TLV_BCN_TX_STATUS_DROP,
+ WMI_TLV_BCN_TX_STATUS_FILTERED,
+};
+
+struct wmi_tlv_bcn_tx_status_ev {
+ __le32 vdev_id;
+ __le32 tx_status;
+} __packed;
+
+struct wmi_tlv_bcn_prb_info {
+ __le32 caps;
+ __le32 erp;
+ u8 ies[];
+} __packed;
+
+struct wmi_tlv_bcn_tmpl_cmd {
+ __le32 vdev_id;
+ __le32 tim_ie_offset;
+ __le32 buf_len;
+} __packed;
+
+struct wmi_tlv_prb_tmpl_cmd {
+ __le32 vdev_id;
+ __le32 buf_len;
+} __packed;
+
+struct wmi_tlv_p2p_go_bcn_ie {
+ __le32 vdev_id;
+ __le32 ie_len;
+} __packed;
+
+enum wmi_tlv_diag_item_type {
+ WMI_TLV_DIAG_ITEM_TYPE_FW_EVENT,
+ WMI_TLV_DIAG_ITEM_TYPE_FW_LOG,
+ WMI_TLV_DIAG_ITEM_TYPE_FW_DEBUG_MSG,
+};
+
+struct wmi_tlv_diag_item {
+ u8 type;
+ u8 reserved;
+ __le16 len;
+ __le32 timestamp;
+ __le32 code;
+ u8 payload[];
+} __packed;
+
+struct wmi_tlv_diag_data_ev {
+ __le32 num_items;
+} __packed;
+
+struct wmi_tlv_sta_keepalive_cmd {
+ __le32 vdev_id;
+ __le32 enabled;
+ __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
+ __le32 interval; /* in seconds */
+} __packed;
+
+struct wmi_tlv_stats_ev {
+ __le32 stats_id; /* WMI_STAT_ */
+ __le32 num_pdev_stats;
+ __le32 num_vdev_stats;
+ __le32 num_peer_stats;
+ __le32 num_bcnflt_stats;
+ __le32 num_chan_stats;
+ __le32 num_mib_stats;
+ __le32 pdev_id;
+ __le32 num_bcn_stats;
+ __le32 num_peer_stats_extd;
+} __packed;
+
+struct wmi_tlv_peer_stats_info_ev {
+ __le32 vdev_id;
+ __le32 num_peers;
+ __le32 more_data;
+} __packed;
+
+#define WMI_TLV_MAX_CHAINS 8
+
+struct wmi_tlv_peer_stats_info {
+ struct wmi_mac_addr peer_macaddr;
+ struct {
+ /* lower 32 bits of the tx_bytes value */
+ __le32 low_32;
+ /* upper 32 bits of the tx_bytes value */
+ __le32 high_32;
+ } __packed tx_bytes;
+ struct {
+ /* lower 32 bits of the tx_packets value */
+ __le32 low_32;
+ /* upper 32 bits of the tx_packets value */
+ __le32 high_32;
+ } __packed tx_packets;
+ struct {
+ /* lower 32 bits of the rx_bytes value */
+ __le32 low_32;
+ /* upper 32 bits of the rx_bytes value */
+ __le32 high_32;
+ } __packed rx_bytes;
+ struct {
+ /* lower 32 bits of the rx_packets value */
+ __le32 low_32;
+ /* upper 32 bits of the rx_packets value */
+ __le32 high_32;
+ } __packed rx_packets;
+ __le32 tx_retries;
+ __le32 tx_failed;
+
+ /* rate information, it is output of WMI_ASSEMBLE_RATECODE_V1
+ * (in format of 0x1000RRRR)
+ * The rate-code is a 4-bytes field in which,
+ * for given rate, nss and preamble
+ *
+ * b'31-b'29 unused / reserved
+ * b'28 indicate the version of rate-code (1 = RATECODE_V1)
+ * b'27-b'11 unused / reserved
+ * b'10-b'8 indicate the preamble (0 OFDM, 1 CCK, 2 HT, 3 VHT)
+ * b'7-b'5 indicate the NSS (0 - 1x1, 1 - 2x2, 2 - 3x3, 3 - 4x4)
+ * b'4-b'0 indicate the rate, which is indicated as follows:
+ * OFDM : 0: OFDM 48 Mbps
+ * 1: OFDM 24 Mbps
+ * 2: OFDM 12 Mbps
+ * 3: OFDM 6 Mbps
+ * 4: OFDM 54 Mbps
+ * 5: OFDM 36 Mbps
+ * 6: OFDM 18 Mbps
+ * 7: OFDM 9 Mbps
+ * CCK (pream == 1)
+ * 0: CCK 11 Mbps Long
+ * 1: CCK 5.5 Mbps Long
+ * 2: CCK 2 Mbps Long
+ * 3: CCK 1 Mbps Long
+ * 4: CCK 11 Mbps Short
+ * 5: CCK 5.5 Mbps Short
+ * 6: CCK 2 Mbps Short
+ * HT/VHT (pream == 2/3)
+ * 0..7: MCS0..MCS7 (HT)
+ * 0..9: MCS0..MCS9 (11AC VHT)
+ * 0..11: MCS0..MCS11 (11AX VHT)
+ * rate-code of the last transmission
+ */
+ __le32 last_tx_rate_code;
+ __le32 last_rx_rate_code;
+ __le32 last_tx_bitrate_kbps;
+ __le32 last_rx_bitrate_kbps;
+ __le32 peer_rssi;
+ __le32 tx_succeed;
+ __le32 peer_rssi_per_chain[WMI_TLV_MAX_CHAINS];
+} __packed;
+
+#define HW_RATECODE_PREAM_V1_MASK GENMASK(10, 8)
+#define WMI_TLV_GET_HW_RC_PREAM_V1(rc) FIELD_GET(HW_RATECODE_PREAM_V1_MASK, rc)
+
+#define HW_RATECODE_NSS_V1_MASK GENMASK(7, 5)
+#define WMI_TLV_GET_HW_RC_NSS_V1(rc) FIELD_GET(HW_RATECODE_NSS_V1_MASK, rc)
+
+#define HW_RATECODE_RATE_V1_MASK GENMASK(4, 0)
+#define WMI_TLV_GET_HW_RC_RATE_V1(rc) FIELD_GET(HW_RATECODE_RATE_V1_MASK, rc)
+
+struct wmi_tlv_p2p_noa_ev {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_tlv_roam_ev {
+ __le32 vdev_id;
+ __le32 reason;
+ __le32 rssi;
+} __packed;
+
+struct wmi_tlv_wow_add_del_event_cmd {
+ __le32 vdev_id;
+ __le32 is_add;
+ __le32 event_bitmap;
+} __packed;
+
+struct wmi_tlv_request_peer_stats_info {
+ __le32 request_type;
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ __le32 reset_after_request;
+} __packed;
+
+/* Command to set/unset chip in quiet mode */
+struct wmi_tlv_set_quiet_cmd {
+ __le32 vdev_id;
+
+ /* in TUs */
+ __le32 period;
+
+ /* in TUs */
+ __le32 duration;
+
+ /* offset in TUs */
+ __le32 next_start;
+ __le32 enabled;
+} __packed;
+
+enum wmi_tlv_wow_interface_cfg {
+ WOW_IFACE_PAUSE_ENABLED,
+ WOW_IFACE_PAUSE_DISABLED
+};
+
+struct wmi_tlv_wow_enable_cmd {
+ __le32 enable;
+ __le32 pause_iface_config;
+ __le32 flags;
+} __packed;
+
+struct wmi_tlv_wow_host_wakeup_ind {
+ __le32 reserved;
+} __packed;
+
+struct wmi_tlv_wow_event_info {
+ __le32 vdev_id;
+ __le32 flag;
+ __le32 wake_reason;
+ __le32 data_len;
+} __packed;
+
+enum wmi_tlv_pattern_type {
+ WOW_PATTERN_MIN = 0,
+ WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+ WOW_IPV4_SYNC_PATTERN,
+ WOW_IPV6_SYNC_PATTERN,
+ WOW_WILD_CARD_PATTERN,
+ WOW_TIMER_PATTERN,
+ WOW_MAGIC_PATTERN,
+ WOW_IPV6_RA_PATTERN,
+ WOW_IOAC_PKT_PATTERN,
+ WOW_IOAC_TMR_PATTERN,
+ WOW_PATTERN_MAX
+};
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
+#define WOW_DEFAULT_BITMASK_SIZE 148
+
+struct wmi_tlv_wow_bitmap_pattern {
+ u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+ u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+ __le32 pattern_offset;
+ __le32 pattern_len;
+ __le32 bitmask_len;
+ __le32 pattern_id;
+} __packed;
+
+struct wmi_tlv_wow_add_pattern_cmd {
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+struct wmi_tlv_wow_del_pattern_cmd {
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+/* TDLS Options */
+enum wmi_tlv_tdls_options {
+ WMI_TLV_TDLS_OFFCHAN_EN = BIT(0),
+ WMI_TLV_TDLS_BUFFER_STA_EN = BIT(1),
+ WMI_TLV_TDLS_SLEEP_STA_EN = BIT(2),
+};
+
+struct wmi_tdls_set_state_cmd {
+ __le32 vdev_id;
+ __le32 state;
+ __le32 notification_interval_ms;
+ __le32 tx_discovery_threshold;
+ __le32 tx_teardown_threshold;
+ __le32 rssi_teardown_threshold;
+ __le32 rssi_delta;
+ __le32 tdls_options;
+ __le32 tdls_peer_traffic_ind_window;
+ __le32 tdls_peer_traffic_response_timeout_ms;
+ __le32 tdls_puapsd_mask;
+ __le32 tdls_puapsd_inactivity_time_ms;
+ __le32 tdls_puapsd_rx_frame_threshold;
+} __packed;
+
+struct wmi_tdls_peer_update_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_state;
+} __packed;
+
+enum {
+ WMI_TLV_TDLS_PEER_QOS_AC_VO = BIT(0),
+ WMI_TLV_TDLS_PEER_QOS_AC_VI = BIT(1),
+ WMI_TLV_TDLS_PEER_QOS_AC_BK = BIT(2),
+ WMI_TLV_TDLS_PEER_QOS_AC_BE = BIT(3),
+};
+
+#define WMI_TLV_TDLS_PEER_SP_MASK 0x60
+#define WMI_TLV_TDLS_PEER_SP_LSB 5
+
+struct wmi_tdls_peer_capab {
+ __le32 peer_qos;
+ __le32 buff_sta_support;
+ __le32 off_chan_support;
+ __le32 peer_curr_operclass;
+ __le32 self_curr_operclass;
+ __le32 peer_chan_len;
+ __le32 peer_operclass_len;
+ u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+ __le32 is_peer_responder;
+ __le32 pref_offchan_num;
+ __le32 pref_offchan_bw;
+} __packed;
+
+struct wmi_tlv_adaptive_qcs {
+ __le32 enable;
+} __packed;
+
+/**
+ * enum wmi_tlv_tx_pause_id - firmware tx queue pause reason types
+ *
+ * @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler.
+ * Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: peer in AP mode is asleep.
+ * Only peer_id is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PS: When all peers are asleep in AP mode. Only
+ * vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_IBSS_PS: When all peers are asleep in IBSS mode. Only
+ * vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_HOST: Host itself requested tx pause.
+ */
+enum wmi_tlv_tx_pause_id {
+ WMI_TLV_TX_PAUSE_ID_MCC = 1,
+ WMI_TLV_TX_PAUSE_ID_AP_PEER_PS = 2,
+ WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD = 3,
+ WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA = 4,
+ WMI_TLV_TX_PAUSE_ID_P2P_GO_PS = 5,
+ WMI_TLV_TX_PAUSE_ID_STA_ADD_BA = 6,
+ WMI_TLV_TX_PAUSE_ID_AP_PS = 7,
+ WMI_TLV_TX_PAUSE_ID_IBSS_PS = 8,
+ WMI_TLV_TX_PAUSE_ID_HOST = 21,
+};
+
+enum wmi_tlv_tx_pause_action {
+ WMI_TLV_TX_PAUSE_ACTION_STOP,
+ WMI_TLV_TX_PAUSE_ACTION_WAKE,
+};
+
+struct wmi_tlv_tx_pause_ev {
+ __le32 pause_id;
+ __le32 action;
+ __le32 vdev_map;
+ __le32 peer_id;
+ __le32 tid_map;
+} __packed;
+
+struct wmi_tlv_tdls_peer_event {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_status;
+ __le32 peer_reason;
+ __le32 vdev_id;
+} __packed;
+
+enum wmi_tlv_sys_cap_info_flags {
+ WMI_TLV_SYS_CAP_INFO_RXTX_LED = BIT(0),
+ WMI_TLV_SYS_CAP_INFO_RFKILL = BIT(1),
+};
+
+#define WMI_TLV_RFKILL_CFG_GPIO_PIN_NUM GENMASK(5, 0)
+#define WMI_TLV_RFKILL_CFG_RADIO_LEVEL BIT(6)
+#define WMI_TLV_RFKILL_CFG_PIN_AS_GPIO GENMASK(10, 7)
+
+enum wmi_tlv_rfkill_enable_radio {
+ WMI_TLV_RFKILL_ENABLE_RADIO_ON = 0,
+ WMI_TLV_RFKILL_ENABLE_RADIO_OFF = 1,
+};
+
+enum wmi_tlv_rfkill_radio_state {
+ WMI_TLV_RFKILL_RADIO_STATE_OFF = 1,
+ WMI_TLV_RFKILL_RADIO_STATE_ON = 2,
+};
+
+struct wmi_tlv_rfkill_state_change_ev {
+ __le32 gpio_pin_num;
+ __le32 int_type;
+ __le32 radio_state;
+};
+
+void ath10k_wmi_tlv_attach(struct ath10k *ar);
+
+enum wmi_nlo_auth_algorithm {
+ WMI_NLO_AUTH_ALGO_80211_OPEN = 1,
+ WMI_NLO_AUTH_ALGO_80211_SHARED_KEY = 2,
+ WMI_NLO_AUTH_ALGO_WPA = 3,
+ WMI_NLO_AUTH_ALGO_WPA_PSK = 4,
+ WMI_NLO_AUTH_ALGO_WPA_NONE = 5,
+ WMI_NLO_AUTH_ALGO_RSNA = 6,
+ WMI_NLO_AUTH_ALGO_RSNA_PSK = 7,
+};
+
+enum wmi_nlo_cipher_algorithm {
+ WMI_NLO_CIPHER_ALGO_NONE = 0x00,
+ WMI_NLO_CIPHER_ALGO_WEP40 = 0x01,
+ WMI_NLO_CIPHER_ALGO_TKIP = 0x02,
+ WMI_NLO_CIPHER_ALGO_CCMP = 0x04,
+ WMI_NLO_CIPHER_ALGO_WEP104 = 0x05,
+ WMI_NLO_CIPHER_ALGO_BIP = 0x06,
+ WMI_NLO_CIPHER_ALGO_RSN_USE_GROUP = 0x100,
+ WMI_NLO_CIPHER_ALGO_WEP = 0x101,
+};
+
+/* SSID broadcast type passed in NLO params */
+enum wmi_nlo_ssid_bcastnwtype {
+ WMI_NLO_BCAST_UNKNOWN = 0,
+ WMI_NLO_BCAST_NORMAL = 1,
+ WMI_NLO_BCAST_HIDDEN = 2,
+};
+
+#define WMI_NLO_MAX_SSIDS 16
+#define WMI_NLO_MAX_CHAN 48
+
+#define WMI_NLO_CONFIG_STOP (0x1 << 0)
+#define WMI_NLO_CONFIG_START (0x1 << 1)
+#define WMI_NLO_CONFIG_RESET (0x1 << 2)
+#define WMI_NLO_CONFIG_SLOW_SCAN (0x1 << 4)
+#define WMI_NLO_CONFIG_FAST_SCAN (0x1 << 5)
+#define WMI_NLO_CONFIG_SSID_HIDE_EN (0x1 << 6)
+
+/* This bit is used to indicate if EPNO or supplicant PNO is enabled.
+ * Only one of them can be enabled at a given time
+ */
+#define WMI_NLO_CONFIG_ENLO (0x1 << 7)
+#define WMI_NLO_CONFIG_SCAN_PASSIVE (0x1 << 8)
+#define WMI_NLO_CONFIG_ENLO_RESET (0x1 << 9)
+#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ (0x1 << 10)
+#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ (0x1 << 11)
+#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ (0x1 << 12)
+#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG (0x1 << 13)
+
+/* Whether directed scan needs to be performed (for hidden SSIDs) */
+#define WMI_ENLO_FLAG_DIRECTED_SCAN 1
+
+/* Whether PNO event shall be triggered if the network is found on A band */
+#define WMI_ENLO_FLAG_A_BAND 2
+
+/* Whether PNO event shall be triggered if the network is found on G band */
+#define WMI_ENLO_FLAG_G_BAND 4
+
+/* Whether strict matching is required (i.e. firmware shall not
+ * match on the entire SSID)
+ */
+#define WMI_ENLO_FLAG_STRICT_MATCH 8
+
+/* Code for matching the beacon AUTH IE - additional codes TBD */
+/* open */
+#define WMI_ENLO_AUTH_CODE_OPEN 1
+
+/* WPA_PSK or WPA2PSK */
+#define WMI_ENLO_AUTH_CODE_PSK 2
+
+/* any EAPOL */
+#define WMI_ENLO_AUTH_CODE_EAPOL 4
+
+struct wmi_nlo_ssid_param {
+ __le32 valid;
+ struct wmi_ssid ssid;
+} __packed;
+
+struct wmi_nlo_enc_param {
+ __le32 valid;
+ __le32 enc_type;
+} __packed;
+
+struct wmi_nlo_auth_param {
+ __le32 valid;
+ __le32 auth_type;
+} __packed;
+
+struct wmi_nlo_bcast_nw_param {
+ __le32 valid;
+
+ /* If WMI_NLO_CONFIG_EPNO is not set. Supplicant PNO is enabled.
+ * The value should be true/false. Otherwise EPNO is enabled.
+ * bcast_nw_type would be used as a bit flag contains WMI_ENLO_FLAG_XXX
+ */
+ __le32 bcast_nw_type;
+} __packed;
+
+struct wmi_nlo_rssi_param {
+ __le32 valid;
+ __le32 rssi;
+} __packed;
+
+struct nlo_configured_parameters {
+ /* TLV tag and len;*/
+ __le32 tlv_header;
+ struct wmi_nlo_ssid_param ssid;
+ struct wmi_nlo_enc_param enc_type;
+ struct wmi_nlo_auth_param auth_type;
+ struct wmi_nlo_rssi_param rssi_cond;
+
+ /* indicates if the SSID is hidden or not */
+ struct wmi_nlo_bcast_nw_param bcast_nw_type;
+} __packed;
+
+/* Support channel prediction for PNO scan after scanning top_k_num channels
+ * if stationary_threshold is met.
+ */
+struct nlo_channel_prediction_cfg {
+ __le32 tlv_header;
+
+ /* Enable or disable this feature. */
+ __le32 enable;
+
+ /* Top K channels will be scanned before deciding whether to further scan
+ * or stop. Minimum value is 3 and maximum is 5.
+ */
+ __le32 top_k_num;
+
+ /* Preconfigured stationary threshold.
+ * Lesser value means more conservative. Bigger value means more aggressive.
+ * Maximum is 100 and minimum is 0.
+ */
+ __le32 stationary_threshold;
+
+ /* Periodic full channel scan in milliseconds unit.
+ * After full_scan_period_ms since last full scan, channel prediction
+ * scan is suppressed and will do full scan.
+ * This is to help detecting sudden AP power-on or -off. Value 0 means no
+ * full scan at all (not recommended).
+ */
+ __le32 full_scan_period_ms;
+} __packed;
+
+struct enlo_candidate_score_params_t {
+ __le32 tlv_header; /* TLV tag and len; */
+
+ /* minimum 5GHz RSSI for a BSSID to be considered (units = dBm) */
+ __le32 min_5ghz_rssi;
+
+ /* minimum 2.4GHz RSSI for a BSSID to be considered (units = dBm) */
+ __le32 min_24ghz_rssi;
+
+ /* the maximum score that a network can have before bonuses */
+ __le32 initial_score_max;
+
+ /* current_connection_bonus:
+ * only report when there is a network's score this much higher
+ * than the current connection
+ */
+ __le32 current_connection_bonus;
+
+ /* score bonus for all networks with the same network flag */
+ __le32 same_network_bonus;
+
+ /* score bonus for networks that are not open */
+ __le32 secure_bonus;
+
+ /* 5GHz RSSI score bonus (applied to all 5GHz networks) */
+ __le32 band_5ghz_bonus;
+} __packed;
+
+struct connected_nlo_bss_band_rssi_pref_t {
+ __le32 tlv_header; /* TLV tag and len;*/
+
+ /* band which needs to get preference over other band
+ * - see wmi_set_vdev_ie_band enum
+ */
+ __le32 band;
+
+ /* Amount of RSSI preference (in dB) that can be given to a band */
+ __le32 rssi_pref;
+} __packed;
+
+struct connected_nlo_rssi_params_t {
+ __le32 tlv_header; /* TLV tag and len;*/
+
+ /* Relative rssi threshold (in dB) by which new BSS should have
+ * better rssi than the current connected BSS.
+ */
+ __le32 relative_rssi;
+
+ /* The amount of rssi preference (in dB) that can be given
+ * to a 5G BSS over 2.4G BSS.
+ */
+ __le32 relative_rssi_5g_pref;
+} __packed;
+
+struct wmi_tlv_wow_nlo_config_cmd {
+ __le32 flags;
+ __le32 vdev_id;
+ __le32 fast_scan_max_cycles;
+ __le32 active_dwell_time;
+ __le32 passive_dwell_time; /* PDT in msecs */
+ __le32 probe_bundle_size;
+
+ /* ART = IRT */
+ __le32 rest_time;
+
+ /* Max value that can be reached after SBM */
+ __le32 max_rest_time;
+
+ /* SBM */
+ __le32 scan_backoff_multiplier;
+
+ /* SCBM */
+ __le32 fast_scan_period;
+
+ /* specific to windows */
+ __le32 slow_scan_period;
+
+ __le32 no_of_ssids;
+
+ __le32 num_of_channels;
+
+ /* NLO scan start delay time in milliseconds */
+ __le32 delay_start_time;
+
+ /** MAC Address to use in Probe Req as SA **/
+ struct wmi_mac_addr mac_addr;
+
+ /** Mask on which MAC has to be randomized **/
+ struct wmi_mac_addr mac_mask;
+
+ /** IE bitmap to use in Probe Req **/
+ __le32 ie_bitmap[8];
+
+ /** Number of vendor OUIs. In the TLV vendor_oui[] **/
+ __le32 num_vendor_oui;
+
+ /** Number of connected NLO band preferences **/
+ __le32 num_cnlo_band_pref;
+
+ /* The TLVs will follow.
+ * nlo_configured_parameters nlo_list[];
+ * A_UINT32 channel_list[num_of_channels];
+ * nlo_channel_prediction_cfg ch_prediction_cfg;
+ * enlo_candidate_score_params candidate_score_params;
+ * wmi_vendor_oui vendor_oui[num_vendor_oui];
+ * connected_nlo_rssi_params cnlo_rssi_params;
+ * connected_nlo_bss_band_rssi_pref cnlo_bss_band_rssi_pref[num_cnlo_band_pref];
+ */
+} __packed;
+
+struct wmi_tlv_mgmt_tx_cmd {
+ __le32 vdev_id;
+ __le32 desc_id;
+ __le32 chanfreq;
+ __le64 paddr;
+ __le32 frame_len;
+ __le32 buf_len;
+} __packed;
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
new file mode 100644
index 000000000000..b4aad6604d6d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -0,0 +1,9641 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+
+#include "core.h"
+#include "htc.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wmi-tlv.h"
+#include "mac.h"
+#include "testmode.h"
+#include "wmi-ops.h"
+#include "p2p.h"
+#include "hw.h"
+#include "hif.h"
+#include "txrx.h"
+
+#define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
+#define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
+#define ATH10K_WMI_DFS_CONF_TIMEOUT_HZ (HZ / 6)
+
+/* MAIN WMI cmd track */
+static struct wmi_cmd_map wmi_cmd_map = {
+ .init_cmdid = WMI_INIT_CMDID,
+ .start_scan_cmdid = WMI_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
+ .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
+ .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
+ .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+ .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+ .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
+ .network_list_offload_config_cmdid =
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
+ .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
+ .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
+ .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
+ .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
+ .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
+ .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
+ .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+ .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
+ .echo_cmdid = WMI_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
+ .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
+ .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
+ .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
+ .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+ .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+ .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.X WMI cmd track */
+static struct wmi_cmd_map wmi_10x_cmd_map = {
+ .init_cmdid = WMI_10X_INIT_CMDID,
+ .start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10X_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+ .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+ .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+ .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.2.4 WMI cmd track */
+static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
+ .init_cmdid = WMI_10_2_INIT_CMDID,
+ .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10_2_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+ .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+ .pdev_enable_adaptive_cca_cmdid = WMI_10_2_SET_CCA_PARAMS,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+ .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+ .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_bss_chan_info_request_cmdid =
+ WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
+ .set_bb_timing_cmdid = WMI_10_2_PDEV_SET_BB_TIMING_CONFIG_CMDID,
+};
+
+/* 10.4 WMI cmd track */
+static struct wmi_cmd_map wmi_10_4_cmd_map = {
+ .init_cmdid = WMI_10_4_INIT_CMDID,
+ .start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+ .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
+ .bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
+ .addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+ .ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+ .peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
+ .csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+ .csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10_4_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+ .vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+ .force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
+ .gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+ .vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+ .vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+ .vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+ .wlan_peer_caching_add_peer_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+ .wlan_peer_caching_evict_peer_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+ .wlan_peer_caching_restore_peer_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+ .wlan_peer_caching_print_all_peers_info_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+ .peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+ .peer_add_proxy_sta_entry_cmdid =
+ WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+ .rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
+ .oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
+ .nan_cmdid = WMI_10_4_NAN_CMDID,
+ .vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
+ .qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
+ .pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+ .pdev_smart_ant_set_rx_antenna_cmdid =
+ WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ .peer_smart_ant_set_tx_antenna_cmdid =
+ WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ .peer_smart_ant_set_train_info_cmdid =
+ WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ .peer_smart_ant_set_node_config_ops_cmdid =
+ WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ .pdev_set_antenna_switch_table_cmdid =
+ WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ .pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+ .pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ .pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+ .pdev_ratepwr_chainmsk_table_cmdid =
+ WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+ .pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
+ .tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
+ .fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
+ .vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+ .peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
+ .pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+ .pdev_get_ani_ofdm_config_cmdid =
+ WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+ .pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+ .pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+ .pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
+ .pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+ .vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
+ .vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
+ .vdev_filter_neighbor_rx_packets_cmdid =
+ WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+ .mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
+ .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
+ .pdev_bss_chan_info_request_cmdid =
+ WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
+ .vdev_set_ie_cmdid = WMI_10_4_VDEV_SET_IE_CMDID,
+ .set_lteu_config_cmdid = WMI_10_4_SET_LTEU_CONFIG_CMDID,
+ .atf_ssid_grouping_request_cmdid =
+ WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID,
+ .peer_atf_ext_request_cmdid = WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID,
+ .set_periodic_channel_stats_cfg_cmdid =
+ WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+ .peer_bwf_request_cmdid = WMI_10_4_PEER_BWF_REQUEST_CMDID,
+ .btcoex_cfg_cmdid = WMI_10_4_BTCOEX_CFG_CMDID,
+ .peer_tx_mu_txmit_count_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID,
+ .peer_tx_mu_txmit_rstcnt_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID,
+ .peer_gid_userpos_list_cmdid = WMI_10_4_PEER_GID_USERPOS_LIST_CMDID,
+ .pdev_check_cal_version_cmdid = WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID,
+ .coex_version_cfg_cmid = WMI_10_4_COEX_VERSION_CFG_CMID,
+ .pdev_get_rx_filter_cmdid = WMI_10_4_PDEV_GET_RX_FILTER_CMDID,
+ .pdev_extended_nss_cfg_cmdid = WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID,
+ .vdev_set_scan_nac_rssi_cmdid = WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID,
+ .prog_gpio_band_select_cmdid = WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID,
+ .config_smart_logging_cmdid = WMI_10_4_CONFIG_SMART_LOGGING_CMDID,
+ .debug_fatal_condition_cmdid = WMI_10_4_DEBUG_FATAL_CONDITION_CMDID,
+ .get_tsf_timer_cmdid = WMI_10_4_GET_TSF_TIMER_CMDID,
+ .pdev_get_tpc_table_cmdid = WMI_10_4_PDEV_GET_TPC_TABLE_CMDID,
+ .vdev_sifs_trigger_time_cmdid = WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID,
+ .pdev_wds_entry_list_cmdid = WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID,
+ .tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID,
+ .tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID,
+ .tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID,
+ .radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID,
+ .per_peer_per_tid_config_cmdid = WMI_10_4_PER_PEER_PER_TID_CONFIG_CMDID,
+};
+
+static struct wmi_peer_param_map wmi_peer_param_map = {
+ .smps_state = WMI_PEER_SMPS_STATE,
+ .ampdu = WMI_PEER_AMPDU,
+ .authorize = WMI_PEER_AUTHORIZE,
+ .chan_width = WMI_PEER_CHAN_WIDTH,
+ .nss = WMI_PEER_NSS,
+ .use_4addr = WMI_PEER_USE_4ADDR,
+ .use_fixed_power = WMI_PEER_USE_FIXED_PWR,
+ .debug = WMI_PEER_DEBUG,
+ .phymode = WMI_PEER_PHYMODE,
+ .dummy_var = WMI_PEER_DUMMY_VAR,
+};
+
+/* MAIN WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_vdev_param_map = {
+ .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_VDEV_PARAM_WDS,
+ .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_VDEV_PARAM_SGI,
+ .ldpc = WMI_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_TXBF,
+ .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
+ .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
+ .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_VDEV_PARAM_UNSUPPORTED,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+/* 10.X WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
+ .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_10X_VDEV_PARAM_WDS,
+ .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_10X_VDEV_PARAM_SGI,
+ .ldpc = WMI_10X_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_10X_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
+ .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
+ .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
+ .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
+ .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_10X_VDEV_PARAM_WDS,
+ .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_10X_VDEV_PARAM_SGI,
+ .ldpc = WMI_10X_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_10X_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
+ .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
+ .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
+ .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rtt_responder_role = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
+ .rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_10_4_VDEV_PARAM_WDS,
+ .atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+ .bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+ .feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_10_4_VDEV_PARAM_SGI,
+ .ldpc = WMI_10_4_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_10_4_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+ .enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_10_4_VDEV_PARAM_TXBF,
+ .packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+ .drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+ .tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ .rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+ .cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+ .mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+ .rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+ .vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+ .vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+ .early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ .early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ .early_rx_bmiss_sample_cycle =
+ WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ .early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ .early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ .early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ .proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
+ .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
+ .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+ .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+ .inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
+ .dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
+ .disable_4addr_src_lrn = WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN,
+ .rtt_responder_role = WMI_10_4_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE,
+};
+
+static struct wmi_pdev_param_map wmi_pdev_param_map = {
+ .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ .dcs = WMI_PDEV_PARAM_DCS,
+ .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
+ .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+ .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
+ .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
+ .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ .dcs = WMI_10X_PDEV_PARAM_DCS,
+ .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
+ .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
+ .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
+ .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
+ .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ .dcs = WMI_10X_PDEV_PARAM_DCS,
+ .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
+ .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
+ .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
+ .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable =
+ WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_10X_PDEV_PARAM_PDEV_RESET,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+/* firmware 10.2 specific mappings */
+static struct wmi_cmd_map wmi_10_2_cmd_map = {
+ .init_cmdid = WMI_10_2_INIT_CMDID,
+ .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10_2_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+ .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
+ .tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ .pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ .pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ .pdev_stats_update_period =
+ WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period =
+ WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period =
+ WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+ .dcs = WMI_10_4_PDEV_PARAM_DCS,
+ .ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
+ .idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+ .power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+ .fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+ .burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+ .aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
+ .rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+ .smart_antenna_default_antenna =
+ WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ .igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+ .igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+ .antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+ .rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
+ .set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+ .proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+ .set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ .set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ .remove_mcast2ucast_buffer =
+ WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ .peer_sta_ps_statechg_enable =
+ WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+ .igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+ .block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+ .set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+ .set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+ .set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+ .txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+ .set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ .set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+ .en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
+ .mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+ .noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+ .noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+ .dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+ .set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+ .atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+ .atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+ .ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
+ .mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+ .sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+ .signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+ .signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+ .enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+ .enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+ .cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+ .rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+ .pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
+ .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+ .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+ .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+ .enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
+};
+
+static const u8 wmi_key_cipher_suites[] = {
+ [WMI_CIPHER_NONE] = WMI_CIPHER_NONE,
+ [WMI_CIPHER_WEP] = WMI_CIPHER_WEP,
+ [WMI_CIPHER_TKIP] = WMI_CIPHER_TKIP,
+ [WMI_CIPHER_AES_OCB] = WMI_CIPHER_AES_OCB,
+ [WMI_CIPHER_AES_CCM] = WMI_CIPHER_AES_CCM,
+ [WMI_CIPHER_WAPI] = WMI_CIPHER_WAPI,
+ [WMI_CIPHER_CKIP] = WMI_CIPHER_CKIP,
+ [WMI_CIPHER_AES_CMAC] = WMI_CIPHER_AES_CMAC,
+ [WMI_CIPHER_AES_GCM] = WMI_CIPHER_AES_GCM,
+};
+
+static const u8 wmi_tlv_key_cipher_suites[] = {
+ [WMI_CIPHER_NONE] = WMI_TLV_CIPHER_NONE,
+ [WMI_CIPHER_WEP] = WMI_TLV_CIPHER_WEP,
+ [WMI_CIPHER_TKIP] = WMI_TLV_CIPHER_TKIP,
+ [WMI_CIPHER_AES_OCB] = WMI_TLV_CIPHER_AES_OCB,
+ [WMI_CIPHER_AES_CCM] = WMI_TLV_CIPHER_AES_CCM,
+ [WMI_CIPHER_WAPI] = WMI_TLV_CIPHER_WAPI,
+ [WMI_CIPHER_CKIP] = WMI_TLV_CIPHER_CKIP,
+ [WMI_CIPHER_AES_CMAC] = WMI_TLV_CIPHER_AES_CMAC,
+ [WMI_CIPHER_AES_GCM] = WMI_TLV_CIPHER_AES_GCM,
+};
+
+static const struct wmi_peer_flags_map wmi_peer_flags_map = {
+ .auth = WMI_PEER_AUTH,
+ .qos = WMI_PEER_QOS,
+ .need_ptk_4_way = WMI_PEER_NEED_PTK_4_WAY,
+ .need_gtk_2_way = WMI_PEER_NEED_GTK_2_WAY,
+ .apsd = WMI_PEER_APSD,
+ .ht = WMI_PEER_HT,
+ .bw40 = WMI_PEER_40MHZ,
+ .stbc = WMI_PEER_STBC,
+ .ldbc = WMI_PEER_LDPC,
+ .dyn_mimops = WMI_PEER_DYN_MIMOPS,
+ .static_mimops = WMI_PEER_STATIC_MIMOPS,
+ .spatial_mux = WMI_PEER_SPATIAL_MUX,
+ .vht = WMI_PEER_VHT,
+ .bw80 = WMI_PEER_80MHZ,
+ .vht_2g = WMI_PEER_VHT_2G,
+ .pmf = WMI_PEER_PMF,
+ .bw160 = WMI_PEER_160MHZ,
+};
+
+static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = {
+ .auth = WMI_10X_PEER_AUTH,
+ .qos = WMI_10X_PEER_QOS,
+ .need_ptk_4_way = WMI_10X_PEER_NEED_PTK_4_WAY,
+ .need_gtk_2_way = WMI_10X_PEER_NEED_GTK_2_WAY,
+ .apsd = WMI_10X_PEER_APSD,
+ .ht = WMI_10X_PEER_HT,
+ .bw40 = WMI_10X_PEER_40MHZ,
+ .stbc = WMI_10X_PEER_STBC,
+ .ldbc = WMI_10X_PEER_LDPC,
+ .dyn_mimops = WMI_10X_PEER_DYN_MIMOPS,
+ .static_mimops = WMI_10X_PEER_STATIC_MIMOPS,
+ .spatial_mux = WMI_10X_PEER_SPATIAL_MUX,
+ .vht = WMI_10X_PEER_VHT,
+ .bw80 = WMI_10X_PEER_80MHZ,
+ .bw160 = WMI_10X_PEER_160MHZ,
+};
+
+static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
+ .auth = WMI_10_2_PEER_AUTH,
+ .qos = WMI_10_2_PEER_QOS,
+ .need_ptk_4_way = WMI_10_2_PEER_NEED_PTK_4_WAY,
+ .need_gtk_2_way = WMI_10_2_PEER_NEED_GTK_2_WAY,
+ .apsd = WMI_10_2_PEER_APSD,
+ .ht = WMI_10_2_PEER_HT,
+ .bw40 = WMI_10_2_PEER_40MHZ,
+ .stbc = WMI_10_2_PEER_STBC,
+ .ldbc = WMI_10_2_PEER_LDPC,
+ .dyn_mimops = WMI_10_2_PEER_DYN_MIMOPS,
+ .static_mimops = WMI_10_2_PEER_STATIC_MIMOPS,
+ .spatial_mux = WMI_10_2_PEER_SPATIAL_MUX,
+ .vht = WMI_10_2_PEER_VHT,
+ .bw80 = WMI_10_2_PEER_80MHZ,
+ .vht_2g = WMI_10_2_PEER_VHT_2G,
+ .pmf = WMI_10_2_PEER_PMF,
+ .bw160 = WMI_10_2_PEER_160MHZ,
+};
+
+void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
+ const struct wmi_channel_arg *arg)
+{
+ u32 flags = 0;
+ struct ieee80211_channel *chan = NULL;
+
+ memset(ch, 0, sizeof(*ch));
+
+ if (arg->passive)
+ flags |= WMI_CHAN_FLAG_PASSIVE;
+ if (arg->allow_ibss)
+ flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
+ if (arg->allow_ht)
+ flags |= WMI_CHAN_FLAG_ALLOW_HT;
+ if (arg->allow_vht)
+ flags |= WMI_CHAN_FLAG_ALLOW_VHT;
+ if (arg->ht40plus)
+ flags |= WMI_CHAN_FLAG_HT40_PLUS;
+ if (arg->chan_radar)
+ flags |= WMI_CHAN_FLAG_DFS;
+
+ ch->band_center_freq2 = 0;
+ ch->mhz = __cpu_to_le32(arg->freq);
+ ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
+ if (arg->mode == MODE_11AC_VHT80_80) {
+ ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2);
+ chan = ieee80211_get_channel(ar->hw->wiphy,
+ arg->band_center_freq2 - 10);
+ }
+
+ if (arg->mode == MODE_11AC_VHT160) {
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+
+ if (arg->freq > arg->band_center_freq1) {
+ band_center_freq1 = arg->band_center_freq1 + 40;
+ band_center_freq2 = arg->band_center_freq1 - 40;
+ } else {
+ band_center_freq1 = arg->band_center_freq1 - 40;
+ band_center_freq2 = arg->band_center_freq1 + 40;
+ }
+
+ ch->band_center_freq1 =
+ __cpu_to_le32(band_center_freq1);
+ /* Minus 10 to get a defined 5G channel frequency*/
+ chan = ieee80211_get_channel(ar->hw->wiphy,
+ band_center_freq2 - 10);
+ /* The center frequency of the entire VHT160 */
+ ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq1);
+ }
+
+ if (chan && chan->flags & IEEE80211_CHAN_RADAR)
+ flags |= WMI_CHAN_FLAG_DFS_CFREQ2;
+
+ ch->min_power = arg->min_power;
+ ch->max_power = arg->max_power;
+ ch->reg_power = arg->max_reg_power;
+ ch->antenna_max = arg->max_antenna_gain;
+ ch->max_tx_power = arg->max_power;
+
+ /* mode & flags share storage */
+ ch->mode = arg->mode;
+ ch->flags |= __cpu_to_le32(flags);
+}
+
+int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
+{
+ unsigned long time_left, i;
+
+ time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left) {
+ /* Sometimes the PCI HIF doesn't receive interrupt
+ * for the service ready message even if the buffer
+ * was completed. PCIe sniffer shows that it's
+ * because the corresponding CE ring doesn't fires
+ * it. Workaround here by polling CE rings once.
+ */
+ ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_hif_send_complete_check(ar, i, 1);
+
+ time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left) {
+ ath10k_warn(ar, "polling timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ ath10k_warn(ar, "service ready completion received, continuing normally\n");
+ }
+
+ return 0;
+}
+
+int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
+ WMI_UNIFIED_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
+{
+ struct sk_buff *skb;
+ u32 round_len = roundup(len, 4);
+
+ skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, WMI_SKB_HEADROOM);
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath10k_warn(ar, "Unaligned WMI skb\n");
+
+ skb_put(skb, round_len);
+ memset(skb->data, 0, round_len);
+
+ return skb;
+}
+
+static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ dev_kfree_skb(skb);
+}
+
+int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+ struct wmi_cmd_hdr *cmd_hdr;
+ int ret;
+ u32 cmd = 0;
+
+ if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return -ENOMEM;
+
+ cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ cmd_hdr->cmd_id = __cpu_to_le32(cmd);
+
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
+ ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
+
+ if (ret)
+ goto err_pull;
+
+ return 0;
+
+err_pull:
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+ return ret;
+}
+
+static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_skb_cb *cb;
+ struct sk_buff *bcn;
+ bool dtim_zero;
+ bool deliver_cab;
+ int ret;
+
+ spin_lock_bh(&ar->data_lock);
+
+ bcn = arvif->beacon;
+
+ if (!bcn)
+ goto unlock;
+
+ cb = ATH10K_SKB_CB(bcn);
+
+ switch (arvif->beacon_state) {
+ case ATH10K_BEACON_SENDING:
+ case ATH10K_BEACON_SENT:
+ break;
+ case ATH10K_BEACON_SCHEDULED:
+ arvif->beacon_state = ATH10K_BEACON_SENDING;
+ spin_unlock_bh(&ar->data_lock);
+
+ dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO);
+ deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB);
+ ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
+ arvif->vdev_id,
+ bcn->data, bcn->len,
+ cb->paddr,
+ dtim_zero,
+ deliver_cab);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (ret == 0)
+ arvif->beacon_state = ATH10K_BEACON_SENT;
+ else
+ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+
+ ath10k_wmi_tx_beacon_nowait(arvif);
+}
+
+static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ ATH10K_ITER_NORMAL_FLAGS,
+ ath10k_wmi_tx_beacons_iter,
+ NULL);
+}
+
+static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
+{
+ /* try to send pending beacons first. they take priority */
+ ath10k_wmi_tx_beacons_nowait(ar);
+
+ wake_up(&ar->wmi.tx_credits_wq);
+}
+
+int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
+{
+ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
+ if (cmd_id == WMI_CMD_UNSUPPORTED) {
+ ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
+ cmd_id);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ wait_event_timeout(ar->wmi.tx_credits_wq, ({
+ if (ar->state == ATH10K_STATE_WEDGED) {
+ ret = -ESHUTDOWN;
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "drop wmi command %d, hardware is wedged\n", cmd_id);
+ }
+ /* try to send pending beacons first. they take priority */
+ ath10k_wmi_tx_beacons_nowait(ar);
+
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
+
+ if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ ret = -ESHUTDOWN;
+
+ (ret != -EAGAIN);
+ }), 3 * HZ);
+
+ if (ret)
+ dev_kfree_skb_any(skb);
+
+ if (ret == -EAGAIN) {
+ ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n",
+ cmd_id);
+ ath10k_core_start_recovery(ar);
+ }
+
+ return ret;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
+{
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+ struct ath10k_vif *arvif;
+ struct wmi_mgmt_tx_cmd *cmd;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ int len;
+ u32 vdev_id;
+ u32 buf_len = msdu->len;
+ u16 fc;
+ const u8 *peer_addr;
+
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ if (cb->vif) {
+ arvif = (void *)cb->vif->drv_priv;
+ vdev_id = arvif->vdev_id;
+ } else {
+ vdev_id = 0;
+ }
+
+ if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(cmd->hdr) + msdu->len;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ peer_addr = hdr->addr1;
+ if (is_multicast_ether_addr(peer_addr)) {
+ len += sizeof(struct ieee80211_mmie_16);
+ buf_len += sizeof(struct ieee80211_mmie_16);
+ } else {
+ if (cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP ||
+ cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) {
+ len += IEEE80211_GCMP_MIC_LEN;
+ buf_len += IEEE80211_GCMP_MIC_LEN;
+ } else {
+ len += IEEE80211_CCMP_MIC_LEN;
+ buf_len += IEEE80211_CCMP_MIC_LEN;
+ }
+ }
+ }
+
+ len = round_up(len, 4);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
+
+ cmd->hdr.vdev_id = __cpu_to_le32(vdev_id);
+ cmd->hdr.tx_rate = 0;
+ cmd->hdr.tx_power = 0;
+ cmd->hdr.buf_len = __cpu_to_le32(buf_len);
+
+ ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
+ memcpy(cmd->buf, msdu->data, msdu->len);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
+ msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
+ fc & IEEE80211_FCTL_STYPE);
+ trace_ath10k_tx_hdr(ar, skb->data, skb->len);
+ trace_ath10k_tx_payload(ar, skb->data, skb->len);
+
+ return skb;
+}
+
+static void ath10k_wmi_event_scan_started(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_STARTING:
+ ar->scan.state = ATH10K_SCAN_RUNNING;
+
+ if (ar->scan.is_roc)
+ ieee80211_ready_on_channel(ar->hw);
+
+ complete(&ar->scan.started);
+ break;
+ }
+}
+
+static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_STARTING:
+ complete(&ar->scan.started);
+ __ath10k_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_STARTING:
+ /* One suspected reason scan can be completed while starting is
+ * if firmware fails to deliver all scan events to the host,
+ * e.g. when transport pipe is full. This has been observed
+ * with spectral scan phyerr events starving wmi transport
+ * pipe. In such case the "scan completed" event should be (and
+ * is) ignored by the host as it may be just firmware's scan
+ * state machine recovering.
+ */
+ ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ __ath10k_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_STARTING:
+ ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ar->scan_channel = NULL;
+ break;
+ }
+}
+
+static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_STARTING:
+ ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+
+ if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+ complete(&ar->scan.on_channel);
+ break;
+ }
+}
+
+static const char *
+ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
+ enum wmi_scan_completion_reason reason)
+{
+ switch (type) {
+ case WMI_SCAN_EVENT_STARTED:
+ return "started";
+ case WMI_SCAN_EVENT_COMPLETED:
+ switch (reason) {
+ case WMI_SCAN_REASON_COMPLETED:
+ return "completed";
+ case WMI_SCAN_REASON_CANCELLED:
+ return "completed [cancelled]";
+ case WMI_SCAN_REASON_PREEMPTED:
+ return "completed [preempted]";
+ case WMI_SCAN_REASON_TIMEDOUT:
+ return "completed [timedout]";
+ case WMI_SCAN_REASON_INTERNAL_FAILURE:
+ return "completed [internal err]";
+ case WMI_SCAN_REASON_MAX:
+ break;
+ }
+ return "completed [unknown]";
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ return "bss channel";
+ case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
+ return "foreign channel";
+ case WMI_SCAN_EVENT_DEQUEUED:
+ return "dequeued";
+ case WMI_SCAN_EVENT_PREEMPTED:
+ return "preempted";
+ case WMI_SCAN_EVENT_START_FAILED:
+ return "start failed";
+ case WMI_SCAN_EVENT_RESTARTED:
+ return "restarted";
+ case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
+ return "foreign channel exit";
+ default:
+ return "unknown";
+ }
+}
+
+static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg)
+{
+ struct wmi_scan_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->event_type = ev->event_type;
+ arg->reason = ev->reason;
+ arg->channel_freq = ev->channel_freq;
+ arg->scan_req_id = ev->scan_req_id;
+ arg->scan_id = ev->scan_id;
+ arg->vdev_id = ev->vdev_id;
+
+ return 0;
+}
+
+int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_scan_ev_arg arg = {};
+ enum wmi_scan_event_type event_type;
+ enum wmi_scan_completion_reason reason;
+ u32 freq;
+ u32 req_id;
+ u32 scan_id;
+ u32 vdev_id;
+ int ret;
+
+ ret = ath10k_wmi_pull_scan(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
+ return ret;
+ }
+
+ event_type = __le32_to_cpu(arg.event_type);
+ reason = __le32_to_cpu(arg.reason);
+ freq = __le32_to_cpu(arg.channel_freq);
+ req_id = __le32_to_cpu(arg.scan_req_id);
+ scan_id = __le32_to_cpu(arg.scan_id);
+ vdev_id = __le32_to_cpu(arg.vdev_id);
+
+ spin_lock_bh(&ar->data_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
+ ath10k_wmi_event_scan_type_str(event_type, reason),
+ event_type, reason, freq, req_id, scan_id, vdev_id,
+ ath10k_scan_state_str(ar->scan.state), ar->scan.state);
+
+ switch (event_type) {
+ case WMI_SCAN_EVENT_STARTED:
+ ath10k_wmi_event_scan_started(ar);
+ break;
+ case WMI_SCAN_EVENT_COMPLETED:
+ ath10k_wmi_event_scan_completed(ar);
+ break;
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ ath10k_wmi_event_scan_bss_chan(ar);
+ break;
+ case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
+ ath10k_wmi_event_scan_foreign_chan(ar, freq);
+ break;
+ case WMI_SCAN_EVENT_START_FAILED:
+ ath10k_warn(ar, "received scan start failure event\n");
+ ath10k_wmi_event_scan_start_failed(ar);
+ break;
+ case WMI_SCAN_EVENT_DEQUEUED:
+ case WMI_SCAN_EVENT_PREEMPTED:
+ case WMI_SCAN_EVENT_RESTARTED:
+ case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
+ default:
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+ return 0;
+}
+
+/* If keys are configured, HW decrypts all frames
+ * with protected bit set. Mark such frames as decrypted.
+ */
+static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ unsigned int hdrlen;
+ bool peer_key;
+ u8 *addr, keyidx;
+
+ if (!ieee80211_is_auth(hdr->frame_control) ||
+ !ieee80211_has_protected(hdr->frame_control))
+ return;
+
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
+ return;
+
+ keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
+ addr = ieee80211_get_SA(hdr);
+
+ spin_lock_bh(&ar->data_lock);
+ peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (peer_key) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac wep key present for peer %pM\n", addr);
+ status->flag |= RX_FLAG_DECRYPTED;
+ }
+}
+
+static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
+{
+ struct wmi_mgmt_rx_event_v1 *ev_v1;
+ struct wmi_mgmt_rx_event_v2 *ev_v2;
+ struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
+ struct wmi_mgmt_rx_ext_info *ext_info;
+ size_t pull_len;
+ u32 msdu_len;
+ u32 len;
+
+ if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
+ ar->running_fw->fw_file.fw_features)) {
+ ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
+ ev_hdr = &ev_v2->hdr.v1;
+ pull_len = sizeof(*ev_v2);
+ } else {
+ ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
+ ev_hdr = &ev_v1->hdr;
+ pull_len = sizeof(*ev_v1);
+ }
+
+ if (skb->len < pull_len)
+ return -EPROTO;
+
+ skb_pull(skb, pull_len);
+ arg->channel = ev_hdr->channel;
+ arg->buf_len = ev_hdr->buf_len;
+ arg->status = ev_hdr->status;
+ arg->snr = ev_hdr->snr;
+ arg->phy_mode = ev_hdr->phy_mode;
+ arg->rate = ev_hdr->rate;
+
+ msdu_len = __le32_to_cpu(arg->buf_len);
+ if (skb->len < msdu_len)
+ return -EPROTO;
+
+ if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+ len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+ ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+ memcpy(&arg->ext_info, ext_info,
+ sizeof(struct wmi_mgmt_rx_ext_info));
+ }
+ /* the WMI buffer might've ended up being padded to 4 bytes due to HTC
+ * trailer with credit update. Trim the excess garbage.
+ */
+ skb_trim(skb, msdu_len);
+
+ return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
+{
+ struct wmi_10_4_mgmt_rx_event *ev;
+ struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
+ size_t pull_len;
+ u32 msdu_len;
+ struct wmi_mgmt_rx_ext_info *ext_info;
+ u32 len;
+
+ ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
+ ev_hdr = &ev->hdr;
+ pull_len = sizeof(*ev);
+
+ if (skb->len < pull_len)
+ return -EPROTO;
+
+ skb_pull(skb, pull_len);
+ arg->channel = ev_hdr->channel;
+ arg->buf_len = ev_hdr->buf_len;
+ arg->status = ev_hdr->status;
+ arg->snr = ev_hdr->snr;
+ arg->phy_mode = ev_hdr->phy_mode;
+ arg->rate = ev_hdr->rate;
+
+ msdu_len = __le32_to_cpu(arg->buf_len);
+ if (skb->len < msdu_len)
+ return -EPROTO;
+
+ if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+ len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+ ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+ memcpy(&arg->ext_info, ext_info,
+ sizeof(struct wmi_mgmt_rx_ext_info));
+ }
+
+ /* Make sure bytes added for padding are removed. */
+ skb_trim(skb, msdu_len);
+
+ return 0;
+}
+
+static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
+ struct ieee80211_hdr *hdr)
+{
+ if (!ieee80211_has_protected(hdr->frame_control))
+ return false;
+
+ /* FW delivers WEP Shared Auth frame with Protected Bit set and
+ * encrypted payload. However in case of PMF it delivers decrypted
+ * frames with Protected Bit set.
+ */
+ if (ieee80211_is_auth(hdr->frame_control))
+ return false;
+
+ /* qca99x0 based FW delivers broadcast or multicast management frames
+ * (ex: group privacy action frames in mesh) as encrypted payload.
+ */
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
+ ar->hw_params.sw_decrypt_mcast_mgmt)
+ return false;
+
+ return true;
+}
+
+static int
+wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param)
+{
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *msdu;
+ int ret;
+
+ spin_lock_bh(&ar->data_lock);
+
+ pkt_addr = idr_find(&wmi->mgmt_pending_tx, param->desc_id);
+ if (!pkt_addr) {
+ ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
+ param->desc_id);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ msdu = pkt_addr->vaddr;
+ dma_unmap_single(ar->dev, pkt_addr->paddr,
+ msdu->len, DMA_TO_DEVICE);
+ info = IEEE80211_SKB_CB(msdu);
+ kfree(pkt_addr);
+
+ if (param->status) {
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+ } else {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ param->ack_rssi;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ }
+
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ ret = 0;
+
+out:
+ idr_remove(&wmi->mgmt_pending_tx, param->desc_id);
+ spin_unlock_bh(&ar->data_lock);
+ return ret;
+}
+
+int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
+ struct mgmt_tx_compl_params param;
+ int ret;
+
+ ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret);
+ return ret;
+ }
+
+ memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
+ param.desc_id = __le32_to_cpu(arg.desc_id);
+ param.status = __le32_to_cpu(arg.status);
+
+ if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
+ param.ack_rssi = __le32_to_cpu(arg.ack_rssi);
+
+ wmi_process_mgmt_tx_comp(ar, &param);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n");
+
+ return 0;
+}
+
+int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg arg;
+ struct mgmt_tx_compl_params param;
+ u32 num_reports;
+ int i, ret;
+
+ ret = ath10k_wmi_pull_mgmt_tx_bundle_compl(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse bundle mgmt compl event: %d\n", ret);
+ return ret;
+ }
+
+ num_reports = __le32_to_cpu(arg.num_reports);
+
+ for (i = 0; i < num_reports; i++) {
+ memset(&param, 0, sizeof(struct mgmt_tx_compl_params));
+ param.desc_id = __le32_to_cpu(arg.desc_ids[i]);
+ param.status = __le32_to_cpu(arg.desc_ids[i]);
+
+ if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map))
+ param.ack_rssi = __le32_to_cpu(arg.ack_rssi[i]);
+ wmi_process_mgmt_tx_comp(ar, &param);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv event bundle mgmt tx completion\n");
+
+ return 0;
+}
+
+int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_mgmt_rx_ev_arg arg = {};
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_supported_band *sband;
+ u32 rx_status;
+ u32 channel;
+ u32 phy_mode;
+ u32 snr, rssi;
+ u32 rate;
+ u16 fc;
+ int ret, i;
+
+ ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ channel = __le32_to_cpu(arg.channel);
+ rx_status = __le32_to_cpu(arg.status);
+ snr = __le32_to_cpu(arg.snr);
+ phy_mode = __le32_to_cpu(arg.phy_mode);
+ rate = __le32_to_cpu(arg.rate);
+
+ memset(status, 0, sizeof(*status));
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
+ "event mgmt rx status %08x\n", rx_status);
+
+ if ((test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) ||
+ (rx_status & (WMI_RX_STATUS_ERR_DECRYPT |
+ WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (rx_status & WMI_RX_STATUS_ERR_MIC)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (rx_status & WMI_RX_STATUS_EXT_INFO) {
+ status->mactime =
+ __le64_to_cpu(arg.ext_info.rx_mac_timestamp);
+ status->flag |= RX_FLAG_MACTIME_END;
+ }
+ /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
+ * MODE_11B. This means phy_mode is not a reliable source for the band
+ * of mgmt rx.
+ */
+ if (channel >= 1 && channel <= 14) {
+ status->band = NL80211_BAND_2GHZ;
+ } else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) {
+ status->band = NL80211_BAND_5GHZ;
+ } else {
+ /* Shouldn't happen unless list of advertised channels to
+ * mac80211 has been changed.
+ */
+ WARN_ON_ONCE(1);
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
+
+ sband = &ar->mac.sbands[status->band];
+
+ status->freq = ieee80211_channel_to_frequency(channel, status->band);
+ status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
+
+ BUILD_BUG_ON(ARRAY_SIZE(status->chain_signal) != ARRAY_SIZE(arg.rssi));
+
+ for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
+ status->chains &= ~BIT(i);
+ rssi = __le32_to_cpu(arg.rssi[i]);
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt rssi[%d]:%d\n", i, arg.rssi[i]);
+
+ if (rssi != ATH10K_INVALID_RSSI && rssi != 0) {
+ status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR + rssi;
+ status->chains |= BIT(i);
+ }
+ }
+
+ status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ /* Firmware is guaranteed to report all essential management frames via
+ * WMI while it can deliver some extra via HTT. Since there can be
+ * duplicates split the reporting wrt monitor/sniffing.
+ */
+ status->flag |= RX_FLAG_SKIP_MONITOR;
+
+ ath10k_wmi_handle_wep_reauth(ar, skb, status);
+
+ if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (!ieee80211_is_action(hdr->frame_control) &&
+ !ieee80211_is_deauth(hdr->frame_control) &&
+ !ieee80211_is_disassoc(hdr->frame_control)) {
+ status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(fc &
+ ~IEEE80211_FCTL_PROTECTED);
+ }
+ }
+
+ if (ieee80211_is_beacon(hdr->frame_control))
+ ath10k_mac_handle_beacon(ar, skb);
+
+ if (ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))
+ status->boottime_ns = ktime_get_boottime_ns();
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
+ "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
+ skb, skb->len,
+ fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
+ "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
+ status->freq, status->band, status->signal,
+ status->rate_idx);
+
+ ieee80211_rx_ni(ar->hw, skb);
+
+ return 0;
+}
+
+static int freq_to_idx(struct ath10k *ar, int freq)
+{
+ struct ieee80211_supported_band *sband;
+ int band, ch, idx = 0;
+
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ sband = ar->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
+ if (sband->channels[ch].center_freq == freq)
+ goto exit;
+ }
+
+exit:
+ return idx;
+}
+
+static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
+{
+ struct wmi_chan_info_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->err_code = ev->err_code;
+ arg->freq = ev->freq;
+ arg->cmd_flags = ev->cmd_flags;
+ arg->noise_floor = ev->noise_floor;
+ arg->rx_clear_count = ev->rx_clear_count;
+ arg->cycle_count = ev->cycle_count;
+
+ return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
+{
+ struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->err_code = ev->err_code;
+ arg->freq = ev->freq;
+ arg->cmd_flags = ev->cmd_flags;
+ arg->noise_floor = ev->noise_floor;
+ arg->rx_clear_count = ev->rx_clear_count;
+ arg->cycle_count = ev->cycle_count;
+ arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+ arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+ arg->rx_frame_count = ev->rx_frame_count;
+
+ return 0;
+}
+
+/*
+ * Handle the channel info event for firmware which only sends one
+ * chan_info event per scanned channel.
+ */
+static void ath10k_wmi_event_chan_info_unpaired(struct ath10k *ar,
+ struct chan_info_params *params)
+{
+ struct survey_info *survey;
+ int idx;
+
+ if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "chan info report completed\n");
+ return;
+ }
+
+ idx = freq_to_idx(ar, params->freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
+ params->freq, idx);
+ return;
+ }
+
+ survey = &ar->survey[idx];
+
+ if (!params->mac_clk_mhz)
+ return;
+
+ memset(survey, 0, sizeof(*survey));
+
+ survey->noise = params->noise_floor;
+ survey->time = (params->cycle_count / params->mac_clk_mhz) / 1000;
+ survey->time_busy = (params->rx_clear_count / params->mac_clk_mhz) / 1000;
+ survey->filled |= SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY;
+}
+
+/*
+ * Handle the channel info event for firmware which sends chan_info
+ * event in pairs(start and stop events) for every scanned channel.
+ */
+static void ath10k_wmi_event_chan_info_paired(struct ath10k *ar,
+ struct chan_info_params *params)
+{
+ struct survey_info *survey;
+ int idx;
+
+ idx = freq_to_idx(ar, params->freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
+ params->freq, idx);
+ return;
+ }
+
+ if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
+ if (ar->ch_info_can_report_survey) {
+ survey = &ar->survey[idx];
+ survey->noise = params->noise_floor;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+
+ ath10k_hw_fill_survey_time(ar,
+ survey,
+ params->cycle_count,
+ params->rx_clear_count,
+ ar->survey_last_cycle_count,
+ ar->survey_last_rx_clear_count);
+ }
+
+ ar->ch_info_can_report_survey = false;
+ } else {
+ ar->ch_info_can_report_survey = true;
+ }
+
+ if (!(params->cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
+ ar->survey_last_rx_clear_count = params->rx_clear_count;
+ ar->survey_last_cycle_count = params->cycle_count;
+ }
+}
+
+void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct chan_info_params ch_info_param;
+ struct wmi_ch_info_ev_arg arg = {};
+ int ret;
+
+ ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
+ return;
+ }
+
+ ch_info_param.err_code = __le32_to_cpu(arg.err_code);
+ ch_info_param.freq = __le32_to_cpu(arg.freq);
+ ch_info_param.cmd_flags = __le32_to_cpu(arg.cmd_flags);
+ ch_info_param.noise_floor = __le32_to_cpu(arg.noise_floor);
+ ch_info_param.rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
+ ch_info_param.cycle_count = __le32_to_cpu(arg.cycle_count);
+ ch_info_param.mac_clk_mhz = __le32_to_cpu(arg.mac_clk_mhz);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
+ ch_info_param.err_code, ch_info_param.freq, ch_info_param.cmd_flags,
+ ch_info_param.noise_floor, ch_info_param.rx_clear_count,
+ ch_info_param.cycle_count);
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_STARTING:
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "received chan info event without a scan request, ignoring\n");
+ goto exit;
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ break;
+ }
+
+ if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
+ ar->running_fw->fw_file.fw_features))
+ ath10k_wmi_event_chan_info_unpaired(ar, &ch_info_param);
+ else
+ ath10k_wmi_event_chan_info_paired(ar, &ch_info_param);
+
+exit:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_echo_ev_arg arg = {};
+ int ret;
+
+ ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse echo: %d\n", ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event echo value 0x%08x\n",
+ le32_to_cpu(arg.value));
+
+ if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
+ complete(&ar->wmi.barrier);
+}
+
+int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
+ skb->len);
+
+ trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
+
+ return 0;
+}
+
+void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
+ dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
+ dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
+ dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
+ dst->cycle_count = __le32_to_cpu(src->cycle_count);
+ dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
+ dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
+}
+
+void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->comp_queued = __le32_to_cpu(src->comp_queued);
+ dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
+ dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
+ dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
+ dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
+ dst->local_enqued = __le32_to_cpu(src->local_enqued);
+ dst->local_freed = __le32_to_cpu(src->local_freed);
+ dst->hw_queued = __le32_to_cpu(src->hw_queued);
+ dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
+ dst->underrun = __le32_to_cpu(src->underrun);
+ dst->tx_abort = __le32_to_cpu(src->tx_abort);
+ dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
+ dst->tx_ko = __le32_to_cpu(src->tx_ko);
+ dst->data_rc = __le32_to_cpu(src->data_rc);
+ dst->self_triggers = __le32_to_cpu(src->self_triggers);
+ dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
+ dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
+ dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
+ dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
+ dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
+ dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
+ dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
+}
+
+static void
+ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->comp_queued = __le32_to_cpu(src->comp_queued);
+ dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
+ dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
+ dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
+ dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
+ dst->local_enqued = __le32_to_cpu(src->local_enqued);
+ dst->local_freed = __le32_to_cpu(src->local_freed);
+ dst->hw_queued = __le32_to_cpu(src->hw_queued);
+ dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
+ dst->underrun = __le32_to_cpu(src->underrun);
+ dst->tx_abort = __le32_to_cpu(src->tx_abort);
+ dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
+ dst->tx_ko = __le32_to_cpu(src->tx_ko);
+ dst->data_rc = __le32_to_cpu(src->data_rc);
+ dst->self_triggers = __le32_to_cpu(src->self_triggers);
+ dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
+ dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
+ dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
+ dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
+ dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
+ dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
+ dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
+ dst->hw_paused = __le32_to_cpu(src->hw_paused);
+ dst->seq_posted = __le32_to_cpu(src->seq_posted);
+ dst->seq_failed_queueing =
+ __le32_to_cpu(src->seq_failed_queueing);
+ dst->seq_completed = __le32_to_cpu(src->seq_completed);
+ dst->seq_restarted = __le32_to_cpu(src->seq_restarted);
+ dst->mu_seq_posted = __le32_to_cpu(src->mu_seq_posted);
+ dst->mpdus_sw_flush = __le32_to_cpu(src->mpdus_sw_flush);
+ dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
+ dst->mpdus_truncated = __le32_to_cpu(src->mpdus_truncated);
+ dst->mpdus_ack_failed = __le32_to_cpu(src->mpdus_ack_failed);
+ dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
+ dst->mpdus_expired = __le32_to_cpu(src->mpdus_expired);
+}
+
+void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
+ dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
+ dst->r0_frags = __le32_to_cpu(src->r0_frags);
+ dst->r1_frags = __le32_to_cpu(src->r1_frags);
+ dst->r2_frags = __le32_to_cpu(src->r2_frags);
+ dst->r3_frags = __le32_to_cpu(src->r3_frags);
+ dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
+ dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
+ dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
+ dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
+ dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
+ dst->phy_errs = __le32_to_cpu(src->phy_errs);
+ dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
+ dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
+}
+
+void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
+ dst->rts_bad = __le32_to_cpu(src->rts_bad);
+ dst->rts_good = __le32_to_cpu(src->rts_good);
+ dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
+ dst->no_beacons = __le32_to_cpu(src->no_beacons);
+ dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
+}
+
+void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst)
+{
+ ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+ dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+ dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+}
+
+static void
+ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst)
+{
+ ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+ dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+ dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+}
+
+static void
+ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd *src,
+ struct ath10k_fw_stats_vdev_extd *dst)
+{
+ dst->vdev_id = __le32_to_cpu(src->vdev_id);
+ dst->ppdu_aggr_cnt = __le32_to_cpu(src->ppdu_aggr_cnt);
+ dst->ppdu_noack = __le32_to_cpu(src->ppdu_noack);
+ dst->mpdu_queued = __le32_to_cpu(src->mpdu_queued);
+ dst->ppdu_nonaggr_cnt = __le32_to_cpu(src->ppdu_nonaggr_cnt);
+ dst->mpdu_sw_requeued = __le32_to_cpu(src->mpdu_sw_requeued);
+ dst->mpdu_suc_retry = __le32_to_cpu(src->mpdu_suc_retry);
+ dst->mpdu_suc_multitry = __le32_to_cpu(src->mpdu_suc_multitry);
+ dst->mpdu_fail_retry = __le32_to_cpu(src->mpdu_fail_retry);
+ dst->tx_ftm_suc = __le32_to_cpu(src->tx_ftm_suc);
+ dst->tx_ftm_suc_retry = __le32_to_cpu(src->tx_ftm_suc_retry);
+ dst->tx_ftm_fail = __le32_to_cpu(src->tx_ftm_fail);
+ dst->rx_ftmr_cnt = __le32_to_cpu(src->rx_ftmr_cnt);
+ dst->rx_ftmr_dup_cnt = __le32_to_cpu(src->rx_ftmr_dup_cnt);
+ dst->rx_iftmr_cnt = __le32_to_cpu(src->rx_iftmr_cnt);
+ dst->rx_iftmr_dup_cnt = __le32_to_cpu(src->rx_iftmr_dup_cnt);
+}
+
+static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats, num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(src, dst);
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats, num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_10x_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10x_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->old, dst);
+
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_10_2_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats;
+ u32 num_pdev_ext_stats;
+ u32 num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_10_2_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < num_pdev_ext_stats; i++) {
+ const struct wmi_10_2_pdev_ext_stats *src;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ /* FIXME: expose values to userspace
+ *
+ * Note: Even though this loop seems to do nothing it is
+ * required to parse following sub-structures properly.
+ */
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10_2_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->old, dst);
+
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_10_2_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats;
+ u32 num_pdev_ext_stats;
+ u32 num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_10_2_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < num_pdev_ext_stats; i++) {
+ const struct wmi_10_2_pdev_ext_stats *src;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ /* FIXME: expose values to userspace
+ *
+ * Note: Even though this loop seems to do nothing it is
+ * required to parse following sub-structures properly.
+ */
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10_2_4_ext_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+ int stats_len;
+
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
+ else
+ stats_len = sizeof(struct wmi_10_2_4_peer_stats);
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, stats_len))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->common.old, dst);
+
+ dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
+
+ if (ath10k_peer_stats_enabled(ar))
+ dst->rx_duration = __le32_to_cpu(src->rx_duration);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_10_2_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats;
+ u32 num_pdev_ext_stats;
+ u32 num_vdev_stats;
+ u32 num_peer_stats;
+ u32 num_bcnflt_stats;
+ u32 stats_id;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+ num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
+ stats_id = __le32_to_cpu(ev->stats_id);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_10_4_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_10_4_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ dst->rx_ovfl_errs = __le32_to_cpu(src->rx_ovfl_errs);
+ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < num_pdev_ext_stats; i++) {
+ const struct wmi_10_2_pdev_ext_stats *src;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ /* FIXME: expose values to userspace
+ *
+ * Note: Even though this loop seems to do nothing it is
+ * required to parse following sub-structures properly.
+ */
+ }
+
+ for (i = 0; i < num_vdev_stats; i++) {
+ const struct wmi_vdev_stats *src;
+
+ /* Ignore vdev stats here as it has only vdev id. Actual vdev
+ * stats will be retrieved from vdev extended stats.
+ */
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+ }
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10_4_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_10_4_pull_peer_stats(src, dst);
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ for (i = 0; i < num_bcnflt_stats; i++) {
+ const struct wmi_10_4_bss_bcn_filter_stats *src;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ /* FIXME: expose values to userspace
+ *
+ * Note: Even though this loop seems to do nothing it is
+ * required to parse following sub-structures properly.
+ */
+ }
+
+ if (stats_id & WMI_10_4_STAT_PEER_EXTD) {
+ stats->extended = true;
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10_4_peer_extd_stats *src;
+ struct ath10k_fw_extd_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ether_addr_copy(dst->peer_macaddr,
+ src->peer_macaddr.addr);
+ dst->rx_duration = __le32_to_cpu(src->rx_duration);
+ list_add_tail(&dst->list, &stats->peers_extd);
+ }
+ }
+
+ if (stats_id & WMI_10_4_STAT_VDEV_EXTD) {
+ for (i = 0; i < num_vdev_stats; i++) {
+ const struct wmi_vdev_stats_extd *src;
+ struct ath10k_fw_stats_vdev_extd *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+ ath10k_wmi_10_4_pull_vdev_stats(src, dst);
+ list_add_tail(&dst->list, &stats->vdevs);
+ }
+ }
+
+ return 0;
+}
+
+void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
+ ath10k_debug_fw_stats_process(ar, skb);
+}
+
+static int
+ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg)
+{
+ struct wmi_vdev_start_response_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_id = ev->vdev_id;
+ arg->req_id = ev->req_id;
+ arg->resp_type = ev->resp_type;
+ arg->status = ev->status;
+
+ return 0;
+}
+
+void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_vdev_start_ev_arg arg = {};
+ int ret;
+ u32 status;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
+
+ ar->last_wmi_vdev_start_status = 0;
+
+ ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
+ ar->last_wmi_vdev_start_status = ret;
+ goto out;
+ }
+
+ status = __le32_to_cpu(arg.status);
+ if (WARN_ON_ONCE(status)) {
+ ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n",
+ status, (status == WMI_VDEV_START_CHAN_INVALID) ?
+ "chan-invalid" : "unknown");
+ /* Setup is done one way or another though, so we should still
+ * do the completion, so don't return here.
+ */
+ ar->last_wmi_vdev_start_status = -EINVAL;
+ }
+
+out:
+ complete(&ar->vdev_setup_done);
+}
+
+void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
+ complete(&ar->vdev_setup_done);
+}
+
+static int
+ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg)
+{
+ struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->mac_addr = ev->peer_macaddr.addr;
+
+ return 0;
+}
+
+void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_peer_kick_ev_arg arg = {};
+ struct ieee80211_sta *sta;
+ int ret;
+
+ ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
+ ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_STA, "wmi event peer sta kickout %pM\n",
+ arg.mac_addr);
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
+ if (!sta) {
+ ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
+ arg.mac_addr);
+ goto exit;
+ }
+
+ ieee80211_report_low_ack(sta, 10);
+
+exit:
+ rcu_read_unlock();
+}
+
+/*
+ * FIXME
+ *
+ * We don't report to mac80211 sleep state of connected
+ * stations. Due to this mac80211 can't fill in TIM IE
+ * correctly.
+ *
+ * I know of no way of getting nullfunc frames that contain
+ * sleep transition from connected stations - these do not
+ * seem to be sent from the target to the host. There also
+ * doesn't seem to be a dedicated event for that. So the
+ * only way left to do this would be to read tim_bitmap
+ * during SWBA.
+ *
+ * We could probably try using tim_bitmap from SWBA to tell
+ * mac80211 which stations are asleep and which are not. The
+ * problem here is calling mac80211 functions so many times
+ * could take too long and make us miss the time to submit
+ * the beacon to the target.
+ *
+ * So as a workaround we try to extend the TIM IE if there
+ * is unicast buffered for stations with aid > 7 and fill it
+ * in ourselves.
+ */
+static void ath10k_wmi_update_tim(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct sk_buff *bcn,
+ const struct wmi_tim_info_arg *tim_info)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
+ struct ieee80211_tim_ie *tim;
+ u8 *ies, *ie;
+ u8 ie_len, pvm_len;
+ __le32 t;
+ u32 v, tim_len;
+
+ /* When FW reports 0 in tim_len, ensure at least first byte
+ * in tim_bitmap is considered for pvm calculation.
+ */
+ tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
+
+ /* if next SWBA has no tim_changed the tim_bitmap is garbage.
+ * we must copy the bitmap upon change and reuse it later
+ */
+ if (__le32_to_cpu(tim_info->tim_changed)) {
+ int i;
+
+ if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
+ ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
+ tim_len, sizeof(arvif->u.ap.tim_bitmap));
+ tim_len = sizeof(arvif->u.ap.tim_bitmap);
+ }
+
+ for (i = 0; i < tim_len; i++) {
+ t = tim_info->tim_bitmap[i / 4];
+ v = __le32_to_cpu(t);
+ arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
+ }
+
+ /* FW reports either length 0 or length based on max supported
+ * station. so we calculate this on our own
+ */
+ arvif->u.ap.tim_len = 0;
+ for (i = 0; i < tim_len; i++)
+ if (arvif->u.ap.tim_bitmap[i])
+ arvif->u.ap.tim_len = i;
+
+ arvif->u.ap.tim_len++;
+ }
+
+ ies = bcn->data;
+ ies += ieee80211_hdrlen(hdr->frame_control);
+ ies += 12; /* fixed parameters */
+
+ ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
+ (u8 *)skb_tail_pointer(bcn) - ies);
+ if (!ie) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ ath10k_warn(ar, "no tim ie found;\n");
+ return;
+ }
+
+ tim = (void *)ie + 2;
+ ie_len = ie[1];
+ pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
+
+ if (pvm_len < arvif->u.ap.tim_len) {
+ int expand_size = tim_len - pvm_len;
+ int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
+ void *next_ie = ie + 2 + ie_len;
+
+ if (skb_put(bcn, expand_size)) {
+ memmove(next_ie + expand_size, next_ie, move_size);
+
+ ie[1] += expand_size;
+ ie_len += expand_size;
+ pvm_len += expand_size;
+ } else {
+ ath10k_warn(ar, "tim expansion failed\n");
+ }
+ }
+
+ if (pvm_len > tim_len) {
+ ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
+ return;
+ }
+
+ tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
+ memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
+
+ if (tim->dtim_count == 0) {
+ ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DTIM_ZERO;
+
+ if (__le32_to_cpu(tim_info->tim_mcast) == 1)
+ ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DELIVER_CAB;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
+ tim->dtim_count, tim->dtim_period,
+ tim->bitmap_ctrl, pvm_len);
+}
+
+static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
+ struct sk_buff *bcn,
+ const struct wmi_p2p_noa_info *noa)
+{
+ if (!arvif->vif->p2p)
+ return;
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
+
+ if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
+ ath10k_p2p_noa_update(arvif, noa);
+
+ if (arvif->u.ap.noa_data)
+ if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
+ skb_put_data(bcn, arvif->u.ap.noa_data,
+ arvif->u.ap.noa_len);
+}
+
+static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ struct wmi_host_swba_event *ev = (void *)skb->data;
+ u32 map;
+ size_t i;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_map = ev->vdev_map;
+
+ for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+ if (!(map & BIT(0)))
+ continue;
+
+ /* If this happens there were some changes in firmware and
+ * ath10k should update the max size of tim_info array.
+ */
+ if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+ break;
+
+ if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+ sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
+ arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+ arg->tim_info[i].tim_bitmap =
+ ev->bcn_info[i].tim_info.tim_bitmap;
+ arg->tim_info[i].tim_changed =
+ ev->bcn_info[i].tim_info.tim_changed;
+ arg->tim_info[i].tim_num_ps_pending =
+ ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
+ arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
+ i++;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data;
+ u32 map;
+ size_t i;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_map = ev->vdev_map;
+
+ for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+ if (!(map & BIT(0)))
+ continue;
+
+ /* If this happens there were some changes in firmware and
+ * ath10k should update the max size of tim_info array.
+ */
+ if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+ break;
+
+ if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+ sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
+ arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+ arg->tim_info[i].tim_bitmap =
+ ev->bcn_info[i].tim_info.tim_bitmap;
+ arg->tim_info[i].tim_changed =
+ ev->bcn_info[i].tim_info.tim_changed;
+ arg->tim_info[i].tim_num_ps_pending =
+ ev->bcn_info[i].tim_info.tim_num_ps_pending;
+ i++;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
+ u32 map, tim_len;
+ size_t i;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_map = ev->vdev_map;
+
+ for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+ if (!(map & BIT(0)))
+ continue;
+
+ /* If this happens there were some changes in firmware and
+ * ath10k should update the max size of tim_info array.
+ */
+ if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+ break;
+
+ if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+ sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
+ if (tim_len) {
+ /* Exclude 4 byte guard length */
+ tim_len -= 4;
+ arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
+ } else {
+ arg->tim_info[i].tim_len = 0;
+ }
+
+ arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+ arg->tim_info[i].tim_bitmap =
+ ev->bcn_info[i].tim_info.tim_bitmap;
+ arg->tim_info[i].tim_changed =
+ ev->bcn_info[i].tim_info.tim_changed;
+ arg->tim_info[i].tim_num_ps_pending =
+ ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
+ /* 10.4 firmware doesn't have p2p support. notice of absence
+ * info can be ignored for now.
+ */
+
+ i++;
+ }
+
+ return 0;
+}
+
+static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
+{
+ return WMI_TXBF_CONF_BEFORE_ASSOC;
+}
+
+void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_swba_ev_arg arg = {};
+ u32 map;
+ int i = -1;
+ const struct wmi_tim_info_arg *tim_info;
+ const struct wmi_p2p_noa_info *noa_info;
+ struct ath10k_vif *arvif;
+ struct sk_buff *bcn;
+ dma_addr_t paddr;
+ int ret, vdev_id = 0;
+
+ ret = ath10k_wmi_pull_swba(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
+ return;
+ }
+
+ map = __le32_to_cpu(arg.vdev_map);
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
+ map);
+
+ for (; map; map >>= 1, vdev_id++) {
+ if (!(map & 0x1))
+ continue;
+
+ i++;
+
+ if (i >= WMI_MAX_AP_VDEV) {
+ ath10k_warn(ar, "swba has corrupted vdev map\n");
+ break;
+ }
+
+ tim_info = &arg.tim_info[i];
+ noa_info = arg.noa_info[i];
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
+ "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
+ i,
+ __le32_to_cpu(tim_info->tim_len),
+ __le32_to_cpu(tim_info->tim_mcast),
+ __le32_to_cpu(tim_info->tim_changed),
+ __le32_to_cpu(tim_info->tim_num_ps_pending),
+ __le32_to_cpu(tim_info->tim_bitmap[3]),
+ __le32_to_cpu(tim_info->tim_bitmap[2]),
+ __le32_to_cpu(tim_info->tim_bitmap[1]),
+ __le32_to_cpu(tim_info->tim_bitmap[0]));
+
+ /* TODO: Only first 4 word from tim_bitmap is dumped.
+ * Extend debug code to dump full tim_bitmap.
+ */
+
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (arvif == NULL) {
+ ath10k_warn(ar, "no vif for vdev_id %d found\n",
+ vdev_id);
+ continue;
+ }
+
+ /* mac80211 would have already asked us to stop beaconing and
+ * bring the vdev down, so continue in that case
+ */
+ if (!arvif->is_up)
+ continue;
+
+ /* There are no completions for beacons so wait for next SWBA
+ * before telling mac80211 to decrement CSA counter
+ *
+ * Once CSA counter is completed stop sending beacons until
+ * actual channel switch is done
+ */
+ if (arvif->vif->bss_conf.csa_active &&
+ ieee80211_beacon_cntdwn_is_complete(arvif->vif, 0)) {
+ ieee80211_csa_finish(arvif->vif, 0);
+ continue;
+ }
+
+ bcn = ieee80211_beacon_get(ar->hw, arvif->vif, 0);
+ if (!bcn) {
+ ath10k_warn(ar, "could not get mac80211 beacon\n");
+ continue;
+ }
+
+ ath10k_tx_h_seq_no(arvif->vif, bcn);
+ ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
+ ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arvif->beacon) {
+ switch (arvif->beacon_state) {
+ case ATH10K_BEACON_SENT:
+ break;
+ case ATH10K_BEACON_SCHEDULED:
+ ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
+ arvif->vdev_id);
+ break;
+ case ATH10K_BEACON_SENDING:
+ ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
+ arvif->vdev_id);
+ dev_kfree_skb(bcn);
+ goto skip;
+ }
+
+ ath10k_mac_vif_beacon_free(arvif);
+ }
+
+ if (!arvif->beacon_buf) {
+ paddr = dma_map_single(arvif->ar->dev, bcn->data,
+ bcn->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(arvif->ar->dev, paddr);
+ if (ret) {
+ ath10k_warn(ar, "failed to map beacon: %d\n",
+ ret);
+ dev_kfree_skb_any(bcn);
+ goto skip;
+ }
+
+ ATH10K_SKB_CB(bcn)->paddr = paddr;
+ } else {
+ if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
+ ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
+ bcn->len, IEEE80211_MAX_FRAME_LEN);
+ skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
+ }
+ memcpy(arvif->beacon_buf, bcn->data, bcn->len);
+ ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
+ }
+
+ arvif->beacon = bcn;
+ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
+
+ trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
+ trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
+
+skip:
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ ath10k_wmi_tx_beacons_nowait(ar);
+}
+
+void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
+}
+
+static void ath10k_radar_detected(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
+ ATH10K_DFS_STAT_INC(ar, radar_detected);
+
+ /* Control radar events reporting in debugfs file
+ * dfs_block_radar_events
+ */
+ if (ar->dfs_block_radar_events)
+ ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
+ else
+ ieee80211_radar_detected(ar->hw, NULL);
+}
+
+static void ath10k_radar_confirmation_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ radar_confirmation_work);
+ struct ath10k_radar_found_info radar_info;
+ int ret, time_left;
+
+ reinit_completion(&ar->wmi.radar_confirm);
+
+ spin_lock_bh(&ar->data_lock);
+ memcpy(&radar_info, &ar->last_radar_info, sizeof(radar_info));
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_wmi_report_radar_found(ar, &radar_info);
+ if (ret) {
+ ath10k_warn(ar, "failed to send radar found %d\n", ret);
+ goto wait_complete;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->wmi.radar_confirm,
+ ATH10K_WMI_DFS_CONF_TIMEOUT_HZ);
+ if (time_left) {
+ /* DFS Confirmation status event received and
+ * necessary action completed.
+ */
+ goto wait_complete;
+ } else {
+ /* DFS Confirmation event not received from FW.Considering this
+ * as real radar.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "dfs confirmation not received from fw, considering as radar\n");
+ goto radar_detected;
+ }
+
+radar_detected:
+ ath10k_radar_detected(ar);
+
+ /* Reset state to allow sending confirmation on consecutive radar
+ * detections, unless radar confirmation is disabled/stopped.
+ */
+wait_complete:
+ spin_lock_bh(&ar->data_lock);
+ if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_STOPPED)
+ ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_dfs_radar_report(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ const struct phyerr_radar_report *rr,
+ u64 tsf)
+{
+ u32 reg0, reg1, tsf32l;
+ struct ieee80211_channel *ch;
+ struct pulse_event pe;
+ struct radar_detector_specs rs;
+ u64 tsf64;
+ u8 rssi, width;
+ struct ath10k_radar_found_info *radar_info;
+
+ reg0 = __le32_to_cpu(rr->reg0);
+ reg1 = __le32_to_cpu(rr->reg1);
+
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
+ MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
+ MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
+ MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
+ MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
+ MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
+ MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
+ MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
+
+ if (!ar->dfs_detector)
+ return;
+
+ spin_lock_bh(&ar->data_lock);
+ ch = ar->rx_channel;
+
+ /* fetch target operating channel during channel change */
+ if (!ch)
+ ch = ar->tgt_oper_chan;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!ch) {
+ ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
+ goto radar_detected;
+ }
+
+ /* report event to DFS pattern detector */
+ tsf32l = phyerr->tsf_timestamp;
+ tsf64 = tsf & (~0xFFFFFFFFULL);
+ tsf64 |= tsf32l;
+
+ width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
+ rssi = phyerr->rssi_combined;
+
+ /* hardware store this as 8 bit signed value,
+ * set to zero if negative number
+ */
+ if (rssi & 0x80)
+ rssi = 0;
+
+ pe.ts = tsf64;
+ pe.freq = ch->center_freq;
+ pe.width = width;
+ pe.rssi = rssi;
+ pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
+ pe.freq, pe.width, pe.rssi, pe.ts);
+
+ ATH10K_DFS_STAT_INC(ar, pulses_detected);
+
+ if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe, &rs)) {
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "dfs no pulse pattern detected, yet\n");
+ return;
+ }
+
+ if ((test_bit(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, ar->wmi.svc_map)) &&
+ ar->dfs_detector->region == NL80211_DFS_FCC) {
+ /* Consecutive radar indications need not be
+ * sent to the firmware until we get confirmation
+ * for the previous detected radar.
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_IDLE) {
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+ ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_INPROGRESS;
+ radar_info = &ar->last_radar_info;
+
+ radar_info->pri_min = rs.pri_min;
+ radar_info->pri_max = rs.pri_max;
+ radar_info->width_min = rs.width_min;
+ radar_info->width_max = rs.width_max;
+ /*TODO Find sidx_min and sidx_max */
+ radar_info->sidx_min = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
+ radar_info->sidx_max = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
+
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "sending wmi radar found cmd pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
+ radar_info->pri_min, radar_info->pri_max,
+ radar_info->width_min, radar_info->width_max,
+ radar_info->sidx_min, radar_info->sidx_max);
+ ieee80211_queue_work(ar->hw, &ar->radar_confirmation_work);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+radar_detected:
+ ath10k_radar_detected(ar);
+}
+
+static int ath10k_dfs_fft_report(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ const struct phyerr_fft_report *fftr,
+ u64 tsf)
+{
+ u32 reg0, reg1;
+ u8 rssi, peak_mag;
+
+ reg0 = __le32_to_cpu(fftr->reg0);
+ reg1 = __le32_to_cpu(fftr->reg1);
+ rssi = phyerr->rssi_combined;
+
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
+ MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
+ MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
+ MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
+ MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
+ MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
+ MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
+ MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
+ MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
+
+ peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
+
+ /* false event detection */
+ if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
+ peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
+ ATH10K_DFS_STAT_INC(ar, pulses_discarded);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void ath10k_wmi_event_dfs(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ u64 tsf)
+{
+ int buf_len, tlv_len, res, i = 0;
+ const struct phyerr_tlv *tlv;
+ const struct phyerr_radar_report *rr;
+ const struct phyerr_fft_report *fftr;
+ const u8 *tlv_buf;
+
+ buf_len = phyerr->buf_len;
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
+ phyerr->phy_err_code, phyerr->rssi_combined,
+ phyerr->tsf_timestamp, tsf, buf_len);
+
+ /* Skip event if DFS disabled */
+ if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED))
+ return;
+
+ ATH10K_DFS_STAT_INC(ar, pulses_total);
+
+ while (i < buf_len) {
+ if (i + sizeof(*tlv) > buf_len) {
+ ath10k_warn(ar, "too short buf for tlv header (%d)\n",
+ i);
+ return;
+ }
+
+ tlv = (struct phyerr_tlv *)&phyerr->buf[i];
+ tlv_len = __le16_to_cpu(tlv->len);
+ tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
+ tlv_len, tlv->tag, tlv->sig);
+
+ switch (tlv->tag) {
+ case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
+ if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
+ ath10k_warn(ar, "too short radar pulse summary (%d)\n",
+ i);
+ return;
+ }
+
+ rr = (struct phyerr_radar_report *)tlv_buf;
+ ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
+ break;
+ case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
+ if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
+ ath10k_warn(ar, "too short fft report (%d)\n",
+ i);
+ return;
+ }
+
+ fftr = (struct phyerr_fft_report *)tlv_buf;
+ res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
+ if (res)
+ return;
+ break;
+ }
+
+ i += sizeof(*tlv) + tlv_len;
+ }
+}
+
+void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ u64 tsf)
+{
+ int buf_len, tlv_len, res, i = 0;
+ struct phyerr_tlv *tlv;
+ const void *tlv_buf;
+ const struct phyerr_fft_report *fftr;
+ size_t fftr_len;
+
+ buf_len = phyerr->buf_len;
+
+ while (i < buf_len) {
+ if (i + sizeof(*tlv) > buf_len) {
+ ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
+ i);
+ return;
+ }
+
+ tlv = (struct phyerr_tlv *)&phyerr->buf[i];
+ tlv_len = __le16_to_cpu(tlv->len);
+ tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
+
+ if (i + sizeof(*tlv) + tlv_len > buf_len) {
+ ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
+ i);
+ return;
+ }
+
+ switch (tlv->tag) {
+ case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
+ if (sizeof(*fftr) > tlv_len) {
+ ath10k_warn(ar, "failed to parse fft report at byte %d\n",
+ i);
+ return;
+ }
+
+ fftr_len = tlv_len - sizeof(*fftr);
+ fftr = tlv_buf;
+ res = ath10k_spectral_process_fft(ar, phyerr,
+ fftr, fftr_len,
+ tsf);
+ if (res < 0) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
+ res);
+ return;
+ }
+ break;
+ }
+
+ i += sizeof(*tlv) + tlv_len;
+ }
+}
+
+static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_phyerr_hdr_arg *arg)
+{
+ struct wmi_phyerr_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
+ arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+ arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+ arg->buf_len = skb->len - sizeof(*ev);
+ arg->phyerrs = ev->phyerrs;
+
+ return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_phyerr_hdr_arg *arg)
+{
+ struct wmi_10_4_phyerr_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ /* 10.4 firmware always reports only one phyerr */
+ arg->num_phyerrs = 1;
+
+ arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+ arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+ arg->buf_len = skb->len;
+ arg->phyerrs = skb->data;
+
+ return 0;
+}
+
+int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar,
+ const void *phyerr_buf,
+ int left_len,
+ struct wmi_phyerr_ev_arg *arg)
+{
+ const struct wmi_phyerr *phyerr = phyerr_buf;
+ int i;
+
+ if (left_len < sizeof(*phyerr)) {
+ ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
+ left_len, sizeof(*phyerr));
+ return -EINVAL;
+ }
+
+ arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
+ arg->freq1 = __le16_to_cpu(phyerr->freq1);
+ arg->freq2 = __le16_to_cpu(phyerr->freq2);
+ arg->rssi_combined = phyerr->rssi_combined;
+ arg->chan_width_mhz = phyerr->chan_width_mhz;
+ arg->buf_len = __le32_to_cpu(phyerr->buf_len);
+ arg->buf = phyerr->buf;
+ arg->hdr_len = sizeof(*phyerr);
+
+ for (i = 0; i < 4; i++)
+ arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
+
+ switch (phyerr->phy_err_code) {
+ case PHY_ERROR_GEN_SPECTRAL_SCAN:
+ arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
+ break;
+ case PHY_ERROR_GEN_FALSE_RADAR_EXT:
+ arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT;
+ break;
+ case PHY_ERROR_GEN_RADAR:
+ arg->phy_err_code = PHY_ERROR_RADAR;
+ break;
+ default:
+ arg->phy_err_code = PHY_ERROR_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar,
+ const void *phyerr_buf,
+ int left_len,
+ struct wmi_phyerr_ev_arg *arg)
+{
+ const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf;
+ u32 phy_err_mask;
+ int i;
+
+ if (left_len < sizeof(*phyerr)) {
+ ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
+ left_len, sizeof(*phyerr));
+ return -EINVAL;
+ }
+
+ arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
+ arg->freq1 = __le16_to_cpu(phyerr->freq1);
+ arg->freq2 = __le16_to_cpu(phyerr->freq2);
+ arg->rssi_combined = phyerr->rssi_combined;
+ arg->chan_width_mhz = phyerr->chan_width_mhz;
+ arg->buf_len = __le32_to_cpu(phyerr->buf_len);
+ arg->buf = phyerr->buf;
+ arg->hdr_len = sizeof(*phyerr);
+
+ for (i = 0; i < 4; i++)
+ arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
+
+ phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]);
+
+ if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK)
+ arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
+ else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK)
+ arg->phy_err_code = PHY_ERROR_RADAR;
+ else
+ arg->phy_err_code = PHY_ERROR_UNKNOWN;
+
+ return 0;
+}
+
+void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_phyerr_hdr_arg hdr_arg = {};
+ struct wmi_phyerr_ev_arg phyerr_arg = {};
+ const void *phyerr;
+ u32 count, i, buf_len, phy_err_code;
+ u64 tsf;
+ int left_len, ret;
+
+ ATH10K_DFS_STAT_INC(ar, phy_errors);
+
+ ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret);
+ return;
+ }
+
+ /* Check number of included events */
+ count = hdr_arg.num_phyerrs;
+
+ left_len = hdr_arg.buf_len;
+
+ tsf = hdr_arg.tsf_u32;
+ tsf <<= 32;
+ tsf |= hdr_arg.tsf_l32;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event phyerr count %d tsf64 0x%llX\n",
+ count, tsf);
+
+ phyerr = hdr_arg.phyerrs;
+ for (i = 0; i < count; i++) {
+ ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse phyerr event (%d)\n",
+ i);
+ return;
+ }
+
+ left_len -= phyerr_arg.hdr_len;
+ buf_len = phyerr_arg.buf_len;
+ phy_err_code = phyerr_arg.phy_err_code;
+
+ if (left_len < buf_len) {
+ ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
+ return;
+ }
+
+ left_len -= buf_len;
+
+ switch (phy_err_code) {
+ case PHY_ERROR_RADAR:
+ ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
+ break;
+ case PHY_ERROR_SPECTRAL_SCAN:
+ ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
+ break;
+ case PHY_ERROR_FALSE_RADAR_EXT:
+ ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
+ ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
+ break;
+ default:
+ break;
+ }
+
+ phyerr = phyerr + phyerr_arg.hdr_len + buf_len;
+ }
+}
+
+static int
+ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_dfs_status_ev_arg *arg)
+{
+ struct wmi_dfs_status_ev_arg *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ arg->status = ev->status;
+
+ return 0;
+}
+
+static void
+ath10k_wmi_event_dfs_status_check(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_dfs_status_ev_arg status_arg = {};
+ int ret;
+
+ ret = ath10k_wmi_pull_dfs_status(ar, skb, &status_arg);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to parse dfs status event: %d\n", ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "dfs status event received from fw: %d\n",
+ status_arg.status);
+
+ /* Even in case of radar detection failure we follow the same
+ * behaviour as if radar is detected i.e to switch to a different
+ * channel.
+ */
+ if (status_arg.status == WMI_HW_RADAR_DETECTED ||
+ status_arg.status == WMI_RADAR_DETECTION_FAIL)
+ ath10k_radar_detected(ar);
+ complete(&ar->wmi.radar_confirm);
+}
+
+void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_roam_ev_arg arg = {};
+ int ret;
+ u32 vdev_id;
+ u32 reason;
+ s32 rssi;
+
+ ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
+ return;
+ }
+
+ vdev_id = __le32_to_cpu(arg.vdev_id);
+ reason = __le32_to_cpu(arg.reason);
+ rssi = __le32_to_cpu(arg.rssi);
+ rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi roam event vdev %u reason 0x%08x rssi %d\n",
+ vdev_id, reason, rssi);
+
+ if (reason >= WMI_ROAM_REASON_MAX)
+ ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
+ reason, vdev_id);
+
+ switch (reason) {
+ case WMI_ROAM_REASON_BEACON_MISS:
+ ath10k_mac_handle_beacon_miss(ar, vdev_id);
+ break;
+ case WMI_ROAM_REASON_BETTER_AP:
+ case WMI_ROAM_REASON_LOW_RSSI:
+ case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+ case WMI_ROAM_REASON_HO_FAILED:
+ ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
+ reason, vdev_id);
+ break;
+ }
+}
+
+void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
+}
+
+void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
+{
+ char buf[101], c;
+ int i;
+
+ for (i = 0; i < sizeof(buf) - 1; i++) {
+ if (i >= skb->len)
+ break;
+
+ c = skb->data[i];
+
+ if (c == '\0')
+ break;
+
+ if (isascii(c) && isprint(c))
+ buf[i] = c;
+ else
+ buf[i] = '.';
+ }
+
+ if (i == sizeof(buf) - 1)
+ ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
+
+ /* for some reason the debug prints end with \n, remove that */
+ if (skb->data[i - 1] == '\n')
+ i--;
+
+ /* the last byte is always reserved for the null character */
+ buf[i] = '\0';
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
+}
+
+void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
+}
+
+void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
+}
+
+void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
+}
+
+void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
+}
+
+void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
+}
+
+void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_wow_ev_arg ev = {};
+ int ret;
+
+ complete(&ar->wow.wakeup_completed);
+
+ ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
+ wow_reason(ev.wake_reason));
+}
+
+void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
+}
+
+static u8 ath10k_tpc_config_get_rate(struct ath10k *ar,
+ struct wmi_pdev_tpc_config_event *ev,
+ u32 rate_idx, u32 num_chains,
+ u32 rate_code, u8 type)
+{
+ u8 tpc, num_streams, preamble, ch, stm_idx;
+
+ num_streams = ATH10K_HW_NSS(rate_code);
+ preamble = ATH10K_HW_PREAMBLE(rate_code);
+ ch = num_chains - 1;
+
+ tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]);
+
+ if (__le32_to_cpu(ev->num_tx_chain) <= 1)
+ goto out;
+
+ if (preamble == WMI_RATE_PREAMBLE_CCK)
+ goto out;
+
+ stm_idx = num_streams - 1;
+ if (num_chains <= num_streams)
+ goto out;
+
+ switch (type) {
+ case WMI_TPC_TABLE_TYPE_STBC:
+ tpc = min_t(u8, tpc,
+ ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]);
+ break;
+ case WMI_TPC_TABLE_TYPE_TXBF:
+ tpc = min_t(u8, tpc,
+ ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]);
+ break;
+ case WMI_TPC_TABLE_TYPE_CDD:
+ tpc = min_t(u8, tpc,
+ ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]);
+ break;
+ default:
+ ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type);
+ tpc = 0;
+ break;
+ }
+
+out:
+ return tpc;
+}
+
+static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
+ struct wmi_pdev_tpc_config_event *ev,
+ struct ath10k_tpc_stats *tpc_stats,
+ u8 *rate_code, u16 *pream_table, u8 type)
+{
+ u32 i, j, pream_idx, flags;
+ u8 tpc[WMI_TPC_TX_N_CHAIN];
+ char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+ char buff[WMI_TPC_BUF_SIZE];
+
+ flags = __le32_to_cpu(ev->flags);
+
+ switch (type) {
+ case WMI_TPC_TABLE_TYPE_CDD:
+ if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
+ tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+ return;
+ }
+ break;
+ case WMI_TPC_TABLE_TYPE_STBC:
+ if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
+ tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+ return;
+ }
+ break;
+ case WMI_TPC_TABLE_TYPE_TXBF:
+ if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
+ tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+ return;
+ }
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "invalid table type in wmi tpc event: %d\n", type);
+ return;
+ }
+
+ pream_idx = 0;
+ for (i = 0; i < tpc_stats->rate_max; i++) {
+ memset(tpc_value, 0, sizeof(tpc_value));
+ memset(buff, 0, sizeof(buff));
+ if (i == pream_table[pream_idx])
+ pream_idx++;
+
+ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
+ tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
+ rate_code[i],
+ type);
+ snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
+ strlcat(tpc_value, buff, sizeof(tpc_value));
+ }
+ tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
+ tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
+ memcpy(tpc_stats->tpc_table[type].tpc_value[i],
+ tpc_value, sizeof(tpc_value));
+ }
+}
+
+void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
+ u32 num_tx_chain)
+{
+ u32 i, j, pream_idx;
+ u8 rate_idx;
+
+ /* Create the rate code table based on the chains supported */
+ rate_idx = 0;
+ pream_idx = 0;
+
+ /* Fill CCK rate code */
+ for (i = 0; i < 4; i++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK);
+ rate_idx++;
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill OFDM rate code */
+ for (i = 0; i < 8; i++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM);
+ rate_idx++;
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill HT20 rate code */
+ for (i = 0; i < num_tx_chain; i++) {
+ for (j = 0; j < 8; j++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
+ rate_idx++;
+ }
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill HT40 rate code */
+ for (i = 0; i < num_tx_chain; i++) {
+ for (j = 0; j < 8; j++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
+ rate_idx++;
+ }
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill VHT20 rate code */
+ for (i = 0; i < num_tx_chain; i++) {
+ for (j = 0; j < 10; j++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+ rate_idx++;
+ }
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill VHT40 rate code */
+ for (i = 0; i < num_tx_chain; i++) {
+ for (j = 0; j < 10; j++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+ rate_idx++;
+ }
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill VHT80 rate code */
+ for (i = 0; i < num_tx_chain; i++) {
+ for (j = 0; j < 10; j++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+ rate_idx++;
+ }
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ rate_code[rate_idx++] =
+ ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
+ rate_code[rate_idx++] =
+ ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+ rate_code[rate_idx++] =
+ ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
+ rate_code[rate_idx++] =
+ ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+ rate_code[rate_idx++] =
+ ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+
+ pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END;
+}
+
+void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
+{
+ u32 num_tx_chain, rate_max;
+ u8 rate_code[WMI_TPC_RATE_MAX];
+ u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
+ struct wmi_pdev_tpc_config_event *ev;
+ struct ath10k_tpc_stats *tpc_stats;
+
+ ev = (struct wmi_pdev_tpc_config_event *)skb->data;
+
+ num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+
+ if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
+ ath10k_warn(ar, "number of tx chain is %d greater than TPC configured tx chain %d\n",
+ num_tx_chain, WMI_TPC_TX_N_CHAIN);
+ return;
+ }
+
+ rate_max = __le32_to_cpu(ev->rate_max);
+ if (rate_max > WMI_TPC_RATE_MAX) {
+ ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n",
+ rate_max, WMI_TPC_RATE_MAX);
+ rate_max = WMI_TPC_RATE_MAX;
+ }
+
+ tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
+ if (!tpc_stats)
+ return;
+
+ ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
+ num_tx_chain);
+
+ tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
+ tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
+ tpc_stats->ctl = __le32_to_cpu(ev->ctl);
+ tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
+ tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
+ tpc_stats->twice_antenna_reduction =
+ __le32_to_cpu(ev->twice_antenna_reduction);
+ tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
+ tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
+ tpc_stats->num_tx_chain = num_tx_chain;
+ tpc_stats->rate_max = rate_max;
+
+ ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+ WMI_TPC_TABLE_TYPE_CDD);
+ ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+ WMI_TPC_TABLE_TYPE_STBC);
+ ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+ WMI_TPC_TABLE_TYPE_TXBF);
+
+ ath10k_debug_tpc_stats_process(ar, tpc_stats);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
+ __le32_to_cpu(ev->chan_freq),
+ __le32_to_cpu(ev->phy_mode),
+ __le32_to_cpu(ev->ctl),
+ __le32_to_cpu(ev->reg_domain),
+ a_sle32_to_cpu(ev->twice_antenna_gain),
+ __le32_to_cpu(ev->twice_antenna_reduction),
+ __le32_to_cpu(ev->power_limit),
+ __le32_to_cpu(ev->twice_max_rd_power) / 2,
+ __le32_to_cpu(ev->num_tx_chain),
+ __le32_to_cpu(ev->rate_max));
+}
+
+static u8
+ath10k_wmi_tpc_final_get_rate(struct ath10k *ar,
+ struct wmi_pdev_tpc_final_table_event *ev,
+ u32 rate_idx, u32 num_chains,
+ u32 rate_code, u8 type, u32 pream_idx)
+{
+ u8 tpc, num_streams, preamble, ch, stm_idx;
+ s8 pow_agcdd, pow_agstbc, pow_agtxbf;
+ int pream;
+
+ num_streams = ATH10K_HW_NSS(rate_code);
+ preamble = ATH10K_HW_PREAMBLE(rate_code);
+ ch = num_chains - 1;
+ stm_idx = num_streams - 1;
+ pream = -1;
+
+ if (__le32_to_cpu(ev->chan_freq) <= 2483) {
+ switch (pream_idx) {
+ case WMI_TPC_PREAM_2GHZ_CCK:
+ pream = 0;
+ break;
+ case WMI_TPC_PREAM_2GHZ_OFDM:
+ pream = 1;
+ break;
+ case WMI_TPC_PREAM_2GHZ_HT20:
+ case WMI_TPC_PREAM_2GHZ_VHT20:
+ pream = 2;
+ break;
+ case WMI_TPC_PREAM_2GHZ_HT40:
+ case WMI_TPC_PREAM_2GHZ_VHT40:
+ pream = 3;
+ break;
+ case WMI_TPC_PREAM_2GHZ_VHT80:
+ pream = 4;
+ break;
+ default:
+ pream = -1;
+ break;
+ }
+ }
+
+ if (__le32_to_cpu(ev->chan_freq) >= 5180) {
+ switch (pream_idx) {
+ case WMI_TPC_PREAM_5GHZ_OFDM:
+ pream = 0;
+ break;
+ case WMI_TPC_PREAM_5GHZ_HT20:
+ case WMI_TPC_PREAM_5GHZ_VHT20:
+ pream = 1;
+ break;
+ case WMI_TPC_PREAM_5GHZ_HT40:
+ case WMI_TPC_PREAM_5GHZ_VHT40:
+ pream = 2;
+ break;
+ case WMI_TPC_PREAM_5GHZ_VHT80:
+ pream = 3;
+ break;
+ case WMI_TPC_PREAM_5GHZ_HTCUP:
+ pream = 4;
+ break;
+ default:
+ pream = -1;
+ break;
+ }
+ }
+
+ if (pream == -1) {
+ ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n",
+ pream_idx, __le32_to_cpu(ev->chan_freq));
+ tpc = 0;
+ goto out;
+ }
+
+ if (pream == 4)
+ tpc = min_t(u8, ev->rates_array[rate_idx],
+ ev->max_reg_allow_pow[ch]);
+ else
+ tpc = min_t(u8, min_t(u8, ev->rates_array[rate_idx],
+ ev->max_reg_allow_pow[ch]),
+ ev->ctl_power_table[0][pream][stm_idx]);
+
+ if (__le32_to_cpu(ev->num_tx_chain) <= 1)
+ goto out;
+
+ if (preamble == WMI_RATE_PREAMBLE_CCK)
+ goto out;
+
+ if (num_chains <= num_streams)
+ goto out;
+
+ switch (type) {
+ case WMI_TPC_TABLE_TYPE_STBC:
+ pow_agstbc = ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx];
+ if (pream == 4)
+ tpc = min_t(u8, tpc, pow_agstbc);
+ else
+ tpc = min_t(u8, min_t(u8, tpc, pow_agstbc),
+ ev->ctl_power_table[0][pream][stm_idx]);
+ break;
+ case WMI_TPC_TABLE_TYPE_TXBF:
+ pow_agtxbf = ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx];
+ if (pream == 4)
+ tpc = min_t(u8, tpc, pow_agtxbf);
+ else
+ tpc = min_t(u8, min_t(u8, tpc, pow_agtxbf),
+ ev->ctl_power_table[1][pream][stm_idx]);
+ break;
+ case WMI_TPC_TABLE_TYPE_CDD:
+ pow_agcdd = ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx];
+ if (pream == 4)
+ tpc = min_t(u8, tpc, pow_agcdd);
+ else
+ tpc = min_t(u8, min_t(u8, tpc, pow_agcdd),
+ ev->ctl_power_table[0][pream][stm_idx]);
+ break;
+ default:
+ ath10k_warn(ar, "unknown wmi tpc final table type: %d\n", type);
+ tpc = 0;
+ break;
+ }
+
+out:
+ return tpc;
+}
+
+static void
+ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
+ struct wmi_pdev_tpc_final_table_event *ev,
+ struct ath10k_tpc_stats_final *tpc_stats,
+ u8 *rate_code, u16 *pream_table, u8 type)
+{
+ u32 i, j, pream_idx, flags;
+ u8 tpc[WMI_TPC_TX_N_CHAIN];
+ char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+ char buff[WMI_TPC_BUF_SIZE];
+
+ flags = __le32_to_cpu(ev->flags);
+
+ switch (type) {
+ case WMI_TPC_TABLE_TYPE_CDD:
+ if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
+ tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+ return;
+ }
+ break;
+ case WMI_TPC_TABLE_TYPE_STBC:
+ if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
+ tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+ return;
+ }
+ break;
+ case WMI_TPC_TABLE_TYPE_TXBF:
+ if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
+ tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+ return;
+ }
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "invalid table type in wmi tpc event: %d\n", type);
+ return;
+ }
+
+ pream_idx = 0;
+ for (i = 0; i < tpc_stats->rate_max; i++) {
+ memset(tpc_value, 0, sizeof(tpc_value));
+ memset(buff, 0, sizeof(buff));
+ if (i == pream_table[pream_idx])
+ pream_idx++;
+
+ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
+ tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
+ rate_code[i],
+ type, pream_idx);
+ snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
+ strlcat(tpc_value, buff, sizeof(tpc_value));
+ }
+ tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx;
+ tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i];
+ memcpy(tpc_stats->tpc_table_final[type].tpc_value[i],
+ tpc_value, sizeof(tpc_value));
+ }
+}
+
+void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
+{
+ u32 num_tx_chain, rate_max;
+ u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
+ u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
+ struct wmi_pdev_tpc_final_table_event *ev;
+ struct ath10k_tpc_stats_final *tpc_stats;
+
+ ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
+
+ num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+ if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
+ ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n",
+ num_tx_chain, WMI_TPC_TX_N_CHAIN);
+ return;
+ }
+
+ rate_max = __le32_to_cpu(ev->rate_max);
+ if (rate_max > WMI_TPC_FINAL_RATE_MAX) {
+ ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n",
+ rate_max, WMI_TPC_FINAL_RATE_MAX);
+ rate_max = WMI_TPC_FINAL_RATE_MAX;
+ }
+
+ tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
+ if (!tpc_stats)
+ return;
+
+ ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
+ num_tx_chain);
+
+ tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
+ tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
+ tpc_stats->ctl = __le32_to_cpu(ev->ctl);
+ tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
+ tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
+ tpc_stats->twice_antenna_reduction =
+ __le32_to_cpu(ev->twice_antenna_reduction);
+ tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
+ tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
+ tpc_stats->num_tx_chain = num_tx_chain;
+ tpc_stats->rate_max = rate_max;
+
+ ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+ WMI_TPC_TABLE_TYPE_CDD);
+ ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+ WMI_TPC_TABLE_TYPE_STBC);
+ ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+ WMI_TPC_TABLE_TYPE_TXBF);
+
+ ath10k_debug_tpc_stats_final_process(ar, tpc_stats);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event tpc final table channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
+ __le32_to_cpu(ev->chan_freq),
+ __le32_to_cpu(ev->phy_mode),
+ __le32_to_cpu(ev->ctl),
+ __le32_to_cpu(ev->reg_domain),
+ a_sle32_to_cpu(ev->twice_antenna_gain),
+ __le32_to_cpu(ev->twice_antenna_reduction),
+ __le32_to_cpu(ev->power_limit),
+ __le32_to_cpu(ev->twice_max_rd_power) / 2,
+ __le32_to_cpu(ev->num_tx_chain),
+ __le32_to_cpu(ev->rate_max));
+}
+
+static void
+ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_tdls_peer_event *ev;
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ int vdev_id;
+ int peer_status;
+ int peer_reason;
+ u8 reason;
+
+ if (skb->len < sizeof(*ev)) {
+ ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n",
+ skb->len);
+ return;
+ }
+
+ ev = (struct wmi_tdls_peer_event *)skb->data;
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+ peer_status = __le32_to_cpu(ev->peer_status);
+ peer_reason = __le32_to_cpu(ev->peer_reason);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer) {
+ ath10k_warn(ar, "failed to find peer entry for %pM\n",
+ ev->peer_macaddr.addr);
+ return;
+ }
+
+ switch (peer_status) {
+ case WMI_TDLS_SHOULD_TEARDOWN:
+ switch (peer_reason) {
+ case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
+ case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE:
+ case WMI_TDLS_TEARDOWN_REASON_RSSI:
+ reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE;
+ break;
+ default:
+ reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+ break;
+ }
+
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (!arvif) {
+ ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n",
+ vdev_id);
+ return;
+ }
+
+ ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr,
+ NL80211_TDLS_TEARDOWN, reason,
+ GFP_ATOMIC);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "received tdls teardown event for peer %pM reason %u\n",
+ ev->peer_macaddr.addr, peer_reason);
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "received unknown tdls peer event %u\n",
+ peer_status);
+ break;
+ }
+}
+
+static void
+ath10k_wmi_event_peer_sta_ps_state_chg(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_peer_sta_ps_state_chg_event *ev;
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+ u8 peer_addr[ETH_ALEN];
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ev = (struct wmi_peer_sta_ps_state_chg_event *)skb->data;
+ ether_addr_copy(peer_addr, ev->peer_macaddr.addr);
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer_addr, NULL);
+
+ if (!sta) {
+ ath10k_warn(ar, "failed to find station entry %pM\n",
+ peer_addr);
+ goto exit;
+ }
+
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ arsta->peer_ps_state = __le32_to_cpu(ev->peer_ps_state);
+
+exit:
+ rcu_read_unlock();
+}
+
+void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
+}
+
+void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
+}
+
+void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
+}
+
+void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
+}
+
+void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
+}
+
+void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
+}
+
+void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
+}
+
+void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
+}
+
+void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
+}
+
+static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
+ u32 num_units, u32 unit_len)
+{
+ dma_addr_t paddr;
+ u32 pool_size;
+ int idx = ar->wmi.num_mem_chunks;
+ void *vaddr;
+
+ pool_size = num_units * round_up(unit_len, 4);
+ vaddr = dma_alloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
+
+ if (!vaddr)
+ return -ENOMEM;
+
+ ar->wmi.mem_chunks[idx].vaddr = vaddr;
+ ar->wmi.mem_chunks[idx].paddr = paddr;
+ ar->wmi.mem_chunks[idx].len = pool_size;
+ ar->wmi.mem_chunks[idx].req_id = req_id;
+ ar->wmi.num_mem_chunks++;
+
+ return num_units;
+}
+
+static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
+ u32 num_units, u32 unit_len)
+{
+ int ret;
+
+ while (num_units) {
+ ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len);
+ if (ret < 0)
+ return ret;
+
+ num_units -= ret;
+ }
+
+ return 0;
+}
+
+static bool
+ath10k_wmi_is_host_mem_allocated(struct ath10k *ar,
+ const struct wlan_host_mem_req **mem_reqs,
+ u32 num_mem_reqs)
+{
+ u32 req_id, num_units, unit_size, num_unit_info;
+ u32 pool_size;
+ int i, j;
+ bool found;
+
+ if (ar->wmi.num_mem_chunks != num_mem_reqs)
+ return false;
+
+ for (i = 0; i < num_mem_reqs; ++i) {
+ req_id = __le32_to_cpu(mem_reqs[i]->req_id);
+ num_units = __le32_to_cpu(mem_reqs[i]->num_units);
+ unit_size = __le32_to_cpu(mem_reqs[i]->unit_size);
+ num_unit_info = __le32_to_cpu(mem_reqs[i]->num_unit_info);
+
+ if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
+ if (ar->num_active_peers)
+ num_units = ar->num_active_peers + 1;
+ else
+ num_units = ar->max_num_peers + 1;
+ } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
+ num_units = ar->max_num_peers + 1;
+ } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
+ num_units = ar->max_num_vdevs + 1;
+ }
+
+ found = false;
+ for (j = 0; j < ar->wmi.num_mem_chunks; j++) {
+ if (ar->wmi.mem_chunks[j].req_id == req_id) {
+ pool_size = num_units * round_up(unit_size, 4);
+ if (ar->wmi.mem_chunks[j].len == pool_size) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (!found)
+ return false;
+ }
+
+ return true;
+}
+
+static int
+ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ struct wmi_service_ready_event *ev;
+ size_t i, n;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ ev = (void *)skb->data;
+ skb_pull(skb, sizeof(*ev));
+ arg->min_tx_power = ev->hw_min_tx_power;
+ arg->max_tx_power = ev->hw_max_tx_power;
+ arg->ht_cap = ev->ht_cap_info;
+ arg->vht_cap = ev->vht_cap_info;
+ arg->vht_supp_mcs = ev->vht_supp_mcs;
+ arg->sw_ver0 = ev->sw_version;
+ arg->sw_ver1 = ev->sw_version_1;
+ arg->phy_capab = ev->phy_capability;
+ arg->num_rf_chains = ev->num_rf_chains;
+ arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
+ arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
+ arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
+ arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
+ arg->num_mem_reqs = ev->num_mem_reqs;
+ arg->service_map = ev->wmi_service_bitmap;
+ arg->service_map_len = sizeof(ev->wmi_service_bitmap);
+
+ n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
+ ARRAY_SIZE(arg->mem_reqs));
+ for (i = 0; i < n; i++)
+ arg->mem_reqs[i] = &ev->mem_reqs[i];
+
+ if (skb->len <
+ __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
+ return -EPROTO;
+
+ return 0;
+}
+
+static int
+ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ struct wmi_10x_service_ready_event *ev;
+ int i, n;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ ev = (void *)skb->data;
+ skb_pull(skb, sizeof(*ev));
+ arg->min_tx_power = ev->hw_min_tx_power;
+ arg->max_tx_power = ev->hw_max_tx_power;
+ arg->ht_cap = ev->ht_cap_info;
+ arg->vht_cap = ev->vht_cap_info;
+ arg->vht_supp_mcs = ev->vht_supp_mcs;
+ arg->sw_ver0 = ev->sw_version;
+ arg->phy_capab = ev->phy_capability;
+ arg->num_rf_chains = ev->num_rf_chains;
+ arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->low_2ghz_chan = ev->hal_reg_capabilities.low_2ghz_chan;
+ arg->high_2ghz_chan = ev->hal_reg_capabilities.high_2ghz_chan;
+ arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
+ arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
+ arg->num_mem_reqs = ev->num_mem_reqs;
+ arg->service_map = ev->wmi_service_bitmap;
+ arg->service_map_len = sizeof(ev->wmi_service_bitmap);
+
+ /* Deliberately skipping ev->sys_cap_info as WMI and WMI-TLV have
+ * different values. We would need a translation to handle that,
+ * but as we don't currently need anything from sys_cap_info from
+ * WMI interface (only from WMI-TLV) safest it to skip it.
+ */
+
+ n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
+ ARRAY_SIZE(arg->mem_reqs));
+ for (i = 0; i < n; i++)
+ arg->mem_reqs[i] = &ev->mem_reqs[i];
+
+ if (skb->len <
+ __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
+ return -EPROTO;
+
+ return 0;
+}
+
+static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
+ struct sk_buff *skb = ar->svc_rdy_skb;
+ struct wmi_svc_rdy_ev_arg arg = {};
+ u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
+ int ret;
+ bool allocated;
+
+ if (!skb) {
+ ath10k_warn(ar, "invalid service ready event skb\n");
+ return;
+ }
+
+ ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
+ return;
+ }
+
+ ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
+ arg.service_map_len);
+
+ ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
+ ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
+ ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
+ ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
+ ar->vht_supp_mcs = __le32_to_cpu(arg.vht_supp_mcs);
+ ar->fw_version_major =
+ (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
+ ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
+ ar->fw_version_release =
+ (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
+ ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
+ ar->phy_capability = __le32_to_cpu(arg.phy_capab);
+ ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
+ ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd);
+ ar->low_2ghz_chan = __le32_to_cpu(arg.low_2ghz_chan);
+ ar->high_2ghz_chan = __le32_to_cpu(arg.high_2ghz_chan);
+ ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan);
+ ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan);
+ ar->sys_cap_info = __le32_to_cpu(arg.sys_cap_info);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
+ arg.service_map, arg.service_map_len);
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sys_cap_info 0x%x\n",
+ ar->sys_cap_info);
+
+ if (ar->num_rf_chains > ar->max_spatial_stream) {
+ ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
+ ar->num_rf_chains, ar->max_spatial_stream);
+ ar->num_rf_chains = ar->max_spatial_stream;
+ }
+
+ if (!ar->cfg_tx_chainmask) {
+ ar->cfg_tx_chainmask = (1 << ar->num_rf_chains) - 1;
+ ar->cfg_rx_chainmask = (1 << ar->num_rf_chains) - 1;
+ }
+
+ if (strlen(ar->hw->wiphy->fw_version) == 0) {
+ snprintf(ar->hw->wiphy->fw_version,
+ sizeof(ar->hw->wiphy->fw_version),
+ "%u.%u.%u.%u",
+ ar->fw_version_major,
+ ar->fw_version_minor,
+ ar->fw_version_release,
+ ar->fw_version_build);
+ }
+
+ num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
+ if (num_mem_reqs > WMI_MAX_MEM_REQS) {
+ ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
+ num_mem_reqs);
+ return;
+ }
+
+ if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
+ ar->max_num_vdevs;
+ else
+ ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
+ ar->max_num_vdevs;
+
+ ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
+ ar->max_num_vdevs;
+ ar->num_tids = ar->num_active_peers * 2;
+ ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
+ }
+
+ /* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
+ * and WMI_SERVICE_IRAM_TIDS, etc.
+ */
+
+ allocated = ath10k_wmi_is_host_mem_allocated(ar, arg.mem_reqs,
+ num_mem_reqs);
+ if (allocated)
+ goto skip_mem_alloc;
+
+ /* Either this event is received during boot time or there is a change
+ * in memory requirement from firmware when compared to last request.
+ * Free any old memory and do a fresh allocation based on the current
+ * memory requirement.
+ */
+ ath10k_wmi_free_host_mem(ar);
+
+ for (i = 0; i < num_mem_reqs; ++i) {
+ req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
+ num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
+ unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
+ num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
+
+ if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
+ if (ar->num_active_peers)
+ num_units = ar->num_active_peers + 1;
+ else
+ num_units = ar->max_num_peers + 1;
+ } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
+ /* number of units to allocate is number of
+ * peers, 1 extra for self peer on target
+ * this needs to be tied, host and target
+ * can get out of sync
+ */
+ num_units = ar->max_num_peers + 1;
+ } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
+ num_units = ar->max_num_vdevs + 1;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
+ req_id,
+ __le32_to_cpu(arg.mem_reqs[i]->num_units),
+ num_unit_info,
+ unit_size,
+ num_units);
+
+ ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
+ unit_size);
+ if (ret)
+ return;
+ }
+
+skip_mem_alloc:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_mcs 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x low_2ghz_chan %d high_2ghz_chan %d low_5ghz_chan %d high_5ghz_chan %d num_mem_reqs 0x%08x\n",
+ __le32_to_cpu(arg.min_tx_power),
+ __le32_to_cpu(arg.max_tx_power),
+ __le32_to_cpu(arg.ht_cap),
+ __le32_to_cpu(arg.vht_cap),
+ __le32_to_cpu(arg.vht_supp_mcs),
+ __le32_to_cpu(arg.sw_ver0),
+ __le32_to_cpu(arg.sw_ver1),
+ __le32_to_cpu(arg.fw_build),
+ __le32_to_cpu(arg.phy_capab),
+ __le32_to_cpu(arg.num_rf_chains),
+ __le32_to_cpu(arg.eeprom_rd),
+ __le32_to_cpu(arg.low_2ghz_chan),
+ __le32_to_cpu(arg.high_2ghz_chan),
+ __le32_to_cpu(arg.low_5ghz_chan),
+ __le32_to_cpu(arg.high_5ghz_chan),
+ __le32_to_cpu(arg.num_mem_reqs));
+
+ dev_kfree_skb(skb);
+ ar->svc_rdy_skb = NULL;
+ complete(&ar->wmi.service_ready);
+}
+
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+{
+ ar->svc_rdy_skb = skb;
+ queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
+}
+
+static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg)
+{
+ struct wmi_ready_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->sw_version = ev->sw_version;
+ arg->abi_version = ev->abi_version;
+ arg->status = ev->status;
+ arg->mac_addr = ev->mac_addr.addr;
+
+ return 0;
+}
+
+static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg)
+{
+ struct wmi_roam_ev *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_id = ev->vdev_id;
+ arg->reason = ev->reason;
+
+ return 0;
+}
+
+static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg)
+{
+ struct wmi_echo_event *ev = (void *)skb->data;
+
+ arg->value = ev->value;
+
+ return 0;
+}
+
+int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_rdy_ev_arg arg = {};
+ int ret;
+
+ ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event ready sw_version 0x%08x abi_version %u mac_addr %pM status %d\n",
+ __le32_to_cpu(arg.sw_version),
+ __le32_to_cpu(arg.abi_version),
+ arg.mac_addr,
+ __le32_to_cpu(arg.status));
+
+ if (is_zero_ether_addr(ar->mac_addr))
+ ether_addr_copy(ar->mac_addr, arg.mac_addr);
+ complete(&ar->wmi.unified_ready);
+ return 0;
+}
+
+void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
+{
+ int ret;
+ struct wmi_svc_avail_ev_arg arg = {};
+
+ ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse service available event: %d\n",
+ ret);
+ }
+
+ /*
+ * Initialization of "arg.service_map_ext_valid" to ZERO is necessary
+ * for the below logic to work.
+ */
+ if (arg.service_map_ext_valid)
+ ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
+ __le32_to_cpu(arg.service_map_ext_len));
+}
+
+static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
+{
+ const struct wmi_pdev_temperature_event *ev;
+
+ ev = (struct wmi_pdev_temperature_event *)skb->data;
+ if (WARN_ON(skb->len < sizeof(*ev)))
+ return -EPROTO;
+
+ ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
+ return 0;
+}
+
+static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct wmi_pdev_bss_chan_info_event *ev;
+ struct survey_info *survey;
+ u64 busy, total, tx, rx, rx_bss;
+ u32 freq, noise_floor;
+ u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz;
+ int idx;
+
+ ev = (struct wmi_pdev_bss_chan_info_event *)skb->data;
+ if (WARN_ON(skb->len < sizeof(*ev)))
+ return -EPROTO;
+
+ freq = __le32_to_cpu(ev->freq);
+ noise_floor = __le32_to_cpu(ev->noise_floor);
+ busy = __le64_to_cpu(ev->cycle_busy);
+ total = __le64_to_cpu(ev->cycle_total);
+ tx = __le64_to_cpu(ev->cycle_tx);
+ rx = __le64_to_cpu(ev->cycle_rx);
+ rx_bss = __le64_to_cpu(ev->cycle_rx_bss);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
+ freq, noise_floor, busy, total, tx, rx, rx_bss);
+
+ spin_lock_bh(&ar->data_lock);
+ idx = freq_to_idx(ar, freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
+ freq, idx);
+ goto exit;
+ }
+
+ survey = &ar->survey[idx];
+
+ survey->noise = noise_floor;
+ survey->time = div_u64(total, cc_freq_hz);
+ survey->time_busy = div_u64(busy, cc_freq_hz);
+ survey->time_rx = div_u64(rx_bss, cc_freq_hz);
+ survey->time_tx = div_u64(tx, cc_freq_hz);
+ survey->filled |= (SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->bss_survey_done);
+ return 0;
+}
+
+static inline void ath10k_wmi_queue_set_coverage_class_work(struct ath10k *ar)
+{
+ if (ar->hw_params.hw_ops->set_coverage_class) {
+ spin_lock_bh(&ar->data_lock);
+
+ /* This call only ensures that the modified coverage class
+ * persists in case the firmware sets the registers back to
+ * their default value. So calling it is only necessary if the
+ * coverage class has a non-zero value.
+ */
+ if (ar->fw_coverage.coverage_class)
+ queue_work(ar->workqueue, &ar->set_coverage_class_work);
+
+ spin_unlock_bh(&ar->data_lock);
+ }
+}
+
+static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_event_id id;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ goto out;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ switch (id) {
+ case WMI_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_PDEV_FTM_INTG_EVENTID:
+ ath10k_wmi_event_pdev_ftm_intg(ar, skb);
+ break;
+ case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+ ath10k_wmi_event_gtk_offload_status(ar, skb);
+ break;
+ case WMI_GTK_REKEY_FAIL_EVENTID:
+ ath10k_wmi_event_gtk_rekey_fail(ar, skb);
+ break;
+ case WMI_TX_DELBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_delba_complete(ar, skb);
+ break;
+ case WMI_TX_ADDBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_addba_complete(ar, skb);
+ break;
+ case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+ ath10k_wmi_event_vdev_install_key_complete(ar, skb);
+ break;
+ case WMI_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ return;
+ case WMI_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_SERVICE_AVAILABLE_EVENTID:
+ ath10k_wmi_event_service_available(ar, skb);
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_10x_event_id id;
+ bool consumed;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ goto out;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+ /* Ready event must be handled normally also in UTF mode so that we
+ * know the UTF firmware has booted, others we are just bypass WMI
+ * events to testmode.
+ */
+ if (consumed && id != WMI_10X_READY_EVENTID) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi testmode consumed 0x%x\n", id);
+ goto out;
+ }
+
+ switch (id) {
+ case WMI_10X_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_10X_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10X_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_10X_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_10X_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10X_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_10X_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10X_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10X_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_10X_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_10X_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_10X_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10X_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_10X_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10X_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_10X_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_10X_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_10X_INST_RSSI_STATS_EVENTID:
+ ath10k_wmi_event_inst_rssi_stats(ar, skb);
+ break;
+ case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
+ ath10k_wmi_event_vdev_standby_req(ar, skb);
+ break;
+ case WMI_10X_VDEV_RESUME_REQ_EVENTID:
+ ath10k_wmi_event_vdev_resume_req(ar, skb);
+ break;
+ case WMI_10X_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ return;
+ case WMI_10X_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10X_PDEV_UTF_EVENTID:
+ /* ignore utf events */
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_10_2_event_id id;
+ bool consumed;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ goto out;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+ /* Ready event must be handled normally also in UTF mode so that we
+ * know the UTF firmware has booted, others we are just bypass WMI
+ * events to testmode.
+ */
+ if (consumed && id != WMI_10_2_READY_EVENTID) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi testmode consumed 0x%x\n", id);
+ goto out;
+ }
+
+ switch (id) {
+ case WMI_10_2_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_10_2_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_10_2_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_10_2_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_10_2_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_10_2_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_10_2_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_10_2_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_10_2_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_10_2_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_10_2_INST_RSSI_STATS_EVENTID:
+ ath10k_wmi_event_inst_rssi_stats(ar, skb);
+ break;
+ case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
+ ath10k_wmi_event_vdev_standby_req(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
+ ath10k_wmi_event_vdev_resume_req(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ return;
+ case WMI_10_2_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
+ ath10k_wmi_event_temperature(ar, skb);
+ break;
+ case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
+ break;
+ case WMI_10_2_RTT_KEEPALIVE_EVENTID:
+ case WMI_10_2_GPIO_INPUT_EVENTID:
+ case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
+ case WMI_10_2_GENERIC_BUFFER_EVENTID:
+ case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
+ case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
+ case WMI_10_2_WDS_PEER_EVENTID:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "received event id %d not implemented\n", id);
+ break;
+ case WMI_10_2_PEER_STA_PS_STATECHG_EVENTID:
+ ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_10_4_event_id id;
+ bool consumed;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
+ goto out;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+ /* Ready event must be handled normally also in UTF mode so that we
+ * know the UTF firmware has booted, others we are just bypass WMI
+ * events to testmode.
+ */
+ if (consumed && id != WMI_10_4_READY_EVENTID) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi testmode consumed 0x%x\n", id);
+ goto out;
+ }
+
+ switch (id) {
+ case WMI_10_4_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_10_4_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_10_4_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_4_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ return;
+ case WMI_10_4_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_4_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_10_4_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_10_4_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_10_4_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_4_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_10_4_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_4_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_4_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
+ case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
+ case WMI_10_4_WDS_PEER_EVENTID:
+ case WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "received event id %d not implemented\n", id);
+ break;
+ case WMI_10_4_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_10_4_PDEV_TEMPERATURE_EVENTID:
+ ath10k_wmi_event_temperature(ar, skb);
+ break;
+ case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
+ break;
+ case WMI_10_4_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_10_4_TDLS_PEER_EVENTID:
+ ath10k_wmi_handle_tdls_peer_event(ar, skb);
+ break;
+ case WMI_10_4_PDEV_TPC_TABLE_EVENTID:
+ ath10k_wmi_event_tpc_final_table(ar, skb);
+ break;
+ case WMI_10_4_DFS_STATUS_CHECK_EVENTID:
+ ath10k_wmi_event_dfs_status_check(ar, skb);
+ break;
+ case WMI_10_4_PEER_STA_PS_STATECHG_EVENTID:
+ ath10k_wmi_event_peer_sta_ps_state_chg(ar, skb);
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ int ret;
+
+ ret = ath10k_wmi_rx(ar, skb);
+ if (ret)
+ ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
+}
+
+int ath10k_wmi_connect(struct ath10k *ar)
+{
+ int status;
+ struct ath10k_htc_svc_conn_req conn_req;
+ struct ath10k_htc_svc_conn_resp conn_resp;
+
+ memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ /* these fields are the same for all service endpoints */
+ conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
+ conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
+
+ /* connect to control service */
+ conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
+
+ status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
+ if (status) {
+ ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
+ status);
+ return status;
+ }
+
+ ar->wmi.eid = conn_resp.eid;
+ return 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_base_macaddr(struct ath10k *ar,
+ const u8 macaddr[ETH_ALEN])
+{
+ struct wmi_pdev_set_base_macaddr_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_base_macaddr_cmd *)skb->data;
+ ether_addr_copy(cmd->mac_addr.addr, macaddr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev basemac %pM\n", macaddr);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
+ u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct wmi_pdev_set_regdomain_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
+ cmd->reg_domain = __cpu_to_le32(rd);
+ cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+ cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+ cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+ cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
+ rd, rd2g, rd5g, ctl2g, ctl5g);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
+ rd5g, u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct wmi_pdev_set_regdomain_cmd_10x *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
+ cmd->reg_domain = __cpu_to_le32(rd);
+ cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+ cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+ cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+ cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+ cmd->dfs_domain = __cpu_to_le32(dfs_reg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
+ rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
+{
+ struct wmi_pdev_suspend_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
+ cmd->suspend_opt = __cpu_to_le32(suspend_opt);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, 0);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
+{
+ struct wmi_pdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
+ ath10k_warn(ar, "pdev param %d not supported by firmware\n",
+ id);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
+ cmd->param_id = __cpu_to_le32(id);
+ cmd->param_value = __cpu_to_le32(value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
+ id, value);
+ return skb;
+}
+
+void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
+ struct wmi_host_mem_chunks *chunks)
+{
+ struct host_memory_chunk *chunk;
+ int i;
+
+ chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ chunk = &chunks->items[i];
+ chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+ chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+ chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi chunk %d len %d requested, addr 0x%llx\n",
+ i,
+ ar->wmi.mem_chunks[i].len,
+ (unsigned long long)ar->wmi.mem_chunks[i].paddr);
+ }
+}
+
+static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config config = {};
+ u32 val;
+
+ config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
+ config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
+ config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
+
+ config.num_offload_reorder_bufs =
+ __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
+
+ config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
+ config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
+ config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
+ config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
+ config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
+ config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
+ config.scan_max_pending_reqs =
+ __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
+
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+ config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
+
+ val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+ config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
+
+ config.gtk_offload_max_vdev =
+ __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
+
+ config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
+ config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
+
+ buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
+ ar->wmi.num_mem_chunks));
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_init_cmd *)buf->data;
+
+ memcpy(&cmd->resource_config, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
+ return buf;
+}
+
+static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd_10x *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config_10x config = {};
+ u32 val;
+
+ config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
+ config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
+ config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
+ config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+ config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
+ config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
+ config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
+ config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
+ config.scan_max_pending_reqs =
+ __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
+
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+ config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
+
+ val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+ config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
+
+ config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
+ config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
+
+ buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
+ ar->wmi.num_mem_chunks));
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_init_cmd_10x *)buf->data;
+
+ memcpy(&cmd->resource_config, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
+ return buf;
+}
+
+static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd_10_2 *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config_10x config = {};
+ u32 val, features;
+
+ config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
+ config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
+
+ if (ath10k_peer_stats_enabled(ar)) {
+ config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
+ config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
+ } else {
+ config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
+ config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+ }
+
+ config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
+ config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
+ config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
+ config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
+
+ config.scan_max_pending_reqs =
+ __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
+
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+ config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
+
+ val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+ config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
+
+ config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
+ config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
+
+ buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
+ ar->wmi.num_mem_chunks));
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_init_cmd_10_2 *)buf->data;
+
+ features = WMI_10_2_RX_BATCH_MODE;
+
+ if (test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) &&
+ test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
+ features |= WMI_10_2_COEX_GPIO;
+
+ if (ath10k_peer_stats_enabled(ar))
+ features |= WMI_10_2_PEER_STATS;
+
+ if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
+ features |= WMI_10_2_BSS_CHAN_INFO;
+
+ cmd->resource_config.feature_mask = __cpu_to_le32(features);
+
+ memcpy(&cmd->resource_config.common, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
+ return buf;
+}
+
+static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd_10_4 *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config_10_4 config = {};
+
+ config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
+ config.num_peers = __cpu_to_le32(ar->max_num_peers);
+ config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
+ config.num_tids = __cpu_to_le32(ar->num_tids);
+
+ config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
+ config.num_offload_reorder_buffs =
+ __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
+ config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
+ config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(ar->hw_params.tx_chain_mask);
+ config.rx_chain_mask = __cpu_to_le32(ar->hw_params.rx_chain_mask);
+
+ config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
+
+ config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
+ config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
+ config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
+
+ config.rx_skip_defrag_timeout_dup_detection_check =
+ __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
+
+ config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
+ config.gtk_offload_max_vdev =
+ __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
+ config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
+ config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
+ config.max_peer_ext_stats =
+ __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
+ config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
+
+ config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
+ config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
+ config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
+ config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
+
+ config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
+ config.tt_support =
+ __cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
+ config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
+ config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
+ config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
+
+ buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
+ ar->wmi.num_mem_chunks));
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_init_cmd_10_4 *)buf->data;
+ memcpy(&cmd->resource_config, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
+ return buf;
+}
+
+int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
+{
+ if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
+ return -EINVAL;
+ if (arg->n_channels > ARRAY_SIZE(arg->channels))
+ return -EINVAL;
+ if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
+ return -EINVAL;
+ if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
+ return -EINVAL;
+
+ return 0;
+}
+
+static size_t
+ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
+{
+ int len = 0;
+
+ if (arg->ie_len) {
+ len += sizeof(struct wmi_ie_data);
+ len += roundup(arg->ie_len, 4);
+ }
+
+ if (arg->n_channels) {
+ len += sizeof(struct wmi_chan_list);
+ len += sizeof(__le32) * arg->n_channels;
+ }
+
+ if (arg->n_ssids) {
+ len += sizeof(struct wmi_ssid_list);
+ len += sizeof(struct wmi_ssid) * arg->n_ssids;
+ }
+
+ if (arg->n_bssids) {
+ len += sizeof(struct wmi_bssid_list);
+ len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+ }
+
+ return len;
+}
+
+void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
+ const struct wmi_start_scan_arg *arg)
+{
+ u32 scan_id;
+ u32 scan_req_id;
+
+ scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
+ scan_id |= arg->scan_id;
+
+ scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+ scan_req_id |= arg->scan_req_id;
+
+ cmn->scan_id = __cpu_to_le32(scan_id);
+ cmn->scan_req_id = __cpu_to_le32(scan_req_id);
+ cmn->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmn->scan_priority = __cpu_to_le32(arg->scan_priority);
+ cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
+ cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
+ cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
+ cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time);
+ cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time);
+ cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
+ cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
+ cmn->idle_time = __cpu_to_le32(arg->idle_time);
+ cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time);
+ cmn->probe_delay = __cpu_to_le32(arg->probe_delay);
+ cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
+}
+
+static void
+ath10k_wmi_put_start_scan_tlvs(u8 *tlvs,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_ie_data *ie;
+ struct wmi_chan_list *channels;
+ struct wmi_ssid_list *ssids;
+ struct wmi_bssid_list *bssids;
+ void *ptr = tlvs;
+ int i;
+
+ if (arg->n_channels) {
+ channels = ptr;
+ channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
+ channels->num_chan = __cpu_to_le32(arg->n_channels);
+
+ for (i = 0; i < arg->n_channels; i++)
+ channels->channel_list[i].freq =
+ __cpu_to_le16(arg->channels[i]);
+
+ ptr += sizeof(*channels);
+ ptr += sizeof(__le32) * arg->n_channels;
+ }
+
+ if (arg->n_ssids) {
+ ssids = ptr;
+ ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
+ ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
+
+ for (i = 0; i < arg->n_ssids; i++) {
+ ssids->ssids[i].ssid_len =
+ __cpu_to_le32(arg->ssids[i].len);
+ memcpy(&ssids->ssids[i].ssid,
+ arg->ssids[i].ssid,
+ arg->ssids[i].len);
+ }
+
+ ptr += sizeof(*ssids);
+ ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
+ }
+
+ if (arg->n_bssids) {
+ bssids = ptr;
+ bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
+ bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
+
+ for (i = 0; i < arg->n_bssids; i++)
+ ether_addr_copy(bssids->bssid_list[i].addr,
+ arg->bssids[i].bssid);
+
+ ptr += sizeof(*bssids);
+ ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+ }
+
+ if (arg->ie_len) {
+ ie = ptr;
+ ie->tag = __cpu_to_le32(WMI_IE_TAG);
+ ie->ie_len = __cpu_to_le32(arg->ie_len);
+ memcpy(ie->ie_data, arg->ie, arg->ie_len);
+
+ ptr += sizeof(*ie);
+ ptr += roundup(arg->ie_len, 4);
+ }
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_start_scan_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+ int ret;
+
+ ret = ath10k_wmi_start_scan_verify(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_start_scan_cmd *)skb->data;
+
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
+
+ cmd->burst_duration_ms = __cpu_to_le32(0);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_10x_start_scan_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+ int ret;
+
+ ret = ath10k_wmi_start_scan_verify(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
+
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ ath10k_wmi_put_start_scan_tlvs(cmd->tlvs, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
+ return skb;
+}
+
+void ath10k_wmi_start_scan_init(struct ath10k *ar,
+ struct wmi_start_scan_arg *arg)
+{
+ /* setup commonly used values */
+ arg->scan_req_id = 1;
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ arg->dwell_time_active = 50;
+ arg->dwell_time_passive = 150;
+ arg->min_rest_time = 50;
+ arg->max_rest_time = 500;
+ arg->repeat_probe_time = 0;
+ arg->probe_spacing_time = 0;
+ arg->idle_time = 0;
+ arg->max_scan_time = 20000;
+ arg->probe_delay = 5;
+ arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
+ | WMI_SCAN_EVENT_COMPLETED
+ | WMI_SCAN_EVENT_BSS_CHANNEL
+ | WMI_SCAN_EVENT_FOREIGN_CHANNEL
+ | WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT
+ | WMI_SCAN_EVENT_DEQUEUED;
+ arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->n_bssids = 1;
+ arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
+ const struct wmi_stop_scan_arg *arg)
+{
+ struct wmi_stop_scan_cmd *cmd;
+ struct sk_buff *skb;
+ u32 scan_id;
+ u32 req_id;
+
+ if (arg->req_id > 0xFFF)
+ return ERR_PTR(-EINVAL);
+ if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ scan_id = arg->u.scan_id;
+ scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
+
+ req_id = arg->req_id;
+ req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+
+ cmd = (struct wmi_stop_scan_cmd *)skb->data;
+ cmd->req_type = __cpu_to_le32(arg->req_type);
+ cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
+ cmd->scan_id = __cpu_to_le32(scan_id);
+ cmd->scan_req_id = __cpu_to_le32(req_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
+ arg->req_id, arg->req_type, arg->u.scan_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_type type,
+ enum wmi_vdev_subtype subtype,
+ const u8 macaddr[ETH_ALEN])
+{
+ struct wmi_vdev_create_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_create_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_type = __cpu_to_le32(type);
+ cmd->vdev_subtype = __cpu_to_le32(subtype);
+ ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
+ vdev_id, type, subtype, macaddr);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_delete_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_delete_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "WMI vdev delete id %d\n", vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg,
+ bool restart)
+{
+ struct wmi_vdev_start_request_cmd *cmd;
+ struct sk_buff *skb;
+ const char *cmdname;
+ u32 flags = 0;
+
+ if (WARN_ON(arg->hidden_ssid && !arg->ssid))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+ return ERR_PTR(-EINVAL);
+
+ if (restart)
+ cmdname = "restart";
+ else
+ cmdname = "start";
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ if (arg->hidden_ssid)
+ flags |= WMI_VDEV_START_HIDDEN_SSID;
+ if (arg->pmf_enabled)
+ flags |= WMI_VDEV_START_PMF_ENABLED;
+
+ cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
+ cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
+ cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
+ cmd->flags = __cpu_to_le32(flags);
+ cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
+ cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
+
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+
+ ath10k_wmi_put_wmi_channel(ar, &cmd->chan, &arg->channel);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
+ cmdname, arg->vdev_id,
+ flags, arg->channel.freq, arg->channel.mode,
+ cmd->chan.flags, arg->channel.max_power);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_stop_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_stop_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid)
+{
+ struct wmi_vdev_up_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_up_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_assoc_id = __cpu_to_le32(aid);
+ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
+ vdev_id, aid, bssid);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_down_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_down_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi mgmt vdev down id 0x%x\n", vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value)
+{
+ struct wmi_vdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "vdev param %d not supported by firmware\n",
+ param_id);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi vdev id 0x%x set param %d value %d\n",
+ vdev_id, param_id, param_value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg)
+{
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
+ return ERR_PTR(-EINVAL);
+ if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->key_idx = __cpu_to_le32(arg->key_idx);
+ cmd->key_flags = __cpu_to_le32(arg->key_flags);
+ cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
+ cmd->key_len = __cpu_to_le32(arg->key_len);
+ cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
+ cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
+
+ if (arg->macaddr)
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+ if (arg->key_data)
+ memcpy(cmd->key_data, arg->key_data, arg->key_len);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi vdev install key idx %d cipher %d len %d\n",
+ arg->key_idx, arg->key_cipher, arg->key_len);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
+ const struct wmi_vdev_spectral_conf_arg *arg)
+{
+ struct wmi_vdev_spectral_conf_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->scan_count = __cpu_to_le32(arg->scan_count);
+ cmd->scan_period = __cpu_to_le32(arg->scan_period);
+ cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
+ cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
+ cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
+ cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
+ cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
+ cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
+ cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
+ cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
+ cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
+ cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
+ cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
+ cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
+ cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
+ cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
+ cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
+ cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
+ u32 trigger, u32 enable)
+{
+ struct wmi_vdev_spectral_enable_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->trigger_cmd = __cpu_to_le32(trigger);
+ cmd->enable_cmd = __cpu_to_le32(enable);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type)
+{
+ struct wmi_peer_create_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_peer_create_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->peer_type = __cpu_to_le32(peer_type);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer create vdev_id %d peer_addr %pM\n",
+ vdev_id, peer_addr);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct wmi_peer_delete_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_peer_delete_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer delete vdev_id %d peer_addr %pM\n",
+ vdev_id, peer_addr);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+ struct wmi_peer_flush_tids_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
+ vdev_id, peer_addr, tid_bitmap);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
+ const u8 *peer_addr,
+ enum wmi_peer_param param_id,
+ u32 param_value)
+{
+ struct wmi_peer_set_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_peer_set_param_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi vdev %d peer 0x%pM set param %d value %d\n",
+ vdev_id, peer_addr, param_id, param_value);
+ return skb;
+}
+
+static struct sk_buff *ath10k_wmi_op_gen_gpio_config(struct ath10k *ar,
+ u32 gpio_num, u32 input,
+ u32 pull_type, u32 intr_mode)
+{
+ struct wmi_gpio_config_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_gpio_config_cmd *)skb->data;
+ cmd->pull_type = __cpu_to_le32(pull_type);
+ cmd->gpio_num = __cpu_to_le32(gpio_num);
+ cmd->input = __cpu_to_le32(input);
+ cmd->intr_mode = __cpu_to_le32(intr_mode);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_config gpio_num 0x%08x input 0x%08x pull_type 0x%08x intr_mode 0x%08x\n",
+ gpio_num, input, pull_type, intr_mode);
+
+ return skb;
+}
+
+static struct sk_buff *ath10k_wmi_op_gen_gpio_output(struct ath10k *ar,
+ u32 gpio_num, u32 set)
+{
+ struct wmi_gpio_output_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_gpio_output_cmd *)skb->data;
+ cmd->gpio_num = __cpu_to_le32(gpio_num);
+ cmd->set = __cpu_to_le32(set);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_output gpio_num 0x%08x set 0x%08x\n",
+ gpio_num, set);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode)
+{
+ struct wmi_sta_powersave_mode_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->sta_ps_mode = __cpu_to_le32(psmode);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi set powersave id 0x%x mode %d\n",
+ vdev_id, psmode);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id,
+ u32 value)
+{
+ struct wmi_sta_powersave_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(value);
+
+ ath10k_dbg(ar, ATH10K_DBG_STA,
+ "wmi sta ps param vdev_id 0x%x param %d value %d\n",
+ vdev_id, param_id, value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+ struct wmi_ap_ps_peer_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(value);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
+ vdev_id, param_id, value, mac);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg)
+{
+ struct wmi_scan_chan_list_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_channel_arg *ch;
+ struct wmi_channel *ci;
+ int i;
+
+ skb = ath10k_wmi_alloc_skb(ar, struct_size(cmd, chan_info, arg->n_channels));
+ if (!skb)
+ return ERR_PTR(-EINVAL);
+
+ cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
+ cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
+
+ for (i = 0; i < arg->n_channels; i++) {
+ ch = &arg->channels[i];
+ ci = &cmd->chan_info[i];
+
+ ath10k_wmi_put_wmi_channel(ar, ci, ch);
+ }
+
+ return skb;
+}
+
+static void
+ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
+
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
+ cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
+ cmd->peer_flags = __cpu_to_le32(arg->peer_flags);
+ cmd->peer_caps = __cpu_to_le32(arg->peer_caps);
+ cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
+ cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps);
+ cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
+ cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
+ cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps);
+ cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams);
+ cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
+ cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
+
+ cmd->peer_legacy_rates.num_rates =
+ __cpu_to_le32(arg->peer_legacy_rates.num_rates);
+ memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
+ arg->peer_legacy_rates.num_rates);
+
+ cmd->peer_ht_rates.num_rates =
+ __cpu_to_le32(arg->peer_ht_rates.num_rates);
+ memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
+ arg->peer_ht_rates.num_rates);
+
+ cmd->peer_vht_rates.rx_max_rate =
+ __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
+ cmd->peer_vht_rates.rx_mcs_set =
+ __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
+ cmd->peer_vht_rates.tx_max_rate =
+ __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
+ cmd->peer_vht_rates.tx_mcs_set =
+ __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
+}
+
+static void
+ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
+
+ ath10k_wmi_peer_assoc_fill(ar, buf, arg);
+ memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
+}
+
+static void
+ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ ath10k_wmi_peer_assoc_fill(ar, buf, arg);
+}
+
+static void
+ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
+ int max_mcs, max_nss;
+ u32 info0;
+
+ /* TODO: Is using max values okay with firmware? */
+ max_mcs = 0xf;
+ max_nss = 0xf;
+
+ info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
+ SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
+
+ ath10k_wmi_peer_assoc_fill(ar, buf, arg);
+ cmd->info0 = __cpu_to_le32(info0);
+}
+
+static void
+ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
+
+ ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
+ cmd->peer_bw_rxnss_override =
+ __cpu_to_le32(arg->peer_bw_rxnss_override);
+}
+
+static int
+ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
+{
+ if (arg->peer_mpdu_density > 16)
+ return -EINVAL;
+ if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
+ return -EINVAL;
+ if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ ret = ath10k_wmi_peer_assoc_check_arg(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ ret = ath10k_wmi_peer_assoc_check_arg(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ ret = ath10k_wmi_peer_assoc_check_arg(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ size_t len = sizeof(struct wmi_10_4_peer_assoc_complete_cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ ret = ath10k_wmi_peer_assoc_check_arg(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, 0);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar,
+ enum wmi_bss_survey_req_type type)
+{
+ struct wmi_pdev_chan_info_req_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data;
+ cmd->type = __cpu_to_le32(type);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev bss info request type %d\n", type);
+
+ return skb;
+}
+
+/* This function assumes the beacon is already DMA mapped */
+static struct sk_buff *
+ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
+ size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab)
+{
+ struct wmi_bcn_tx_ref_cmd *cmd;
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ hdr = (struct ieee80211_hdr *)bcn;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->data_len = __cpu_to_le32(bcn_len);
+ cmd->data_ptr = __cpu_to_le32(bcn_paddr);
+ cmd->msdu_id = 0;
+ cmd->frame_control = __cpu_to_le32(fc);
+ cmd->flags = 0;
+ cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
+
+ if (dtim_zero)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
+
+ if (deliver_cab)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
+
+ return skb;
+}
+
+void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
+ const struct wmi_wmm_params_arg *arg)
+{
+ params->cwmin = __cpu_to_le32(arg->cwmin);
+ params->cwmax = __cpu_to_le32(arg->cwmax);
+ params->aifs = __cpu_to_le32(arg->aifs);
+ params->txop = __cpu_to_le32(arg->txop);
+ params->acm = __cpu_to_le32(arg->acm);
+ params->no_ack = __cpu_to_le32(arg->no_ack);
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct wmi_pdev_set_wmm_params *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
+ ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
+ ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
+ ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
+ ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
+{
+ struct wmi_request_stats_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_request_stats_cmd *)skb->data;
+ cmd->stats_id = __cpu_to_le32(stats_mask);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
+ stats_mask);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type, u32 delay_ms)
+{
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
+ cmd->type = __cpu_to_le32(type);
+ cmd->delay_ms = __cpu_to_le32(delay_ms);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
+ type, delay_ms);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
+ u32 log_level)
+{
+ struct wmi_dbglog_cfg_cmd *cmd;
+ struct sk_buff *skb;
+ u32 cfg;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
+
+ if (module_enable) {
+ cfg = SM(log_level,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ } else {
+ /* set back defaults, all modules with WARN level */
+ cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ module_enable = ~0;
+ }
+
+ cmd->module_enable = __cpu_to_le32(module_enable);
+ cmd->module_valid = __cpu_to_le32(~0);
+ cmd->config_enable = __cpu_to_le32(cfg);
+ cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
+ __le32_to_cpu(cmd->module_enable),
+ __le32_to_cpu(cmd->module_valid),
+ __le32_to_cpu(cmd->config_enable),
+ __le32_to_cpu(cmd->config_valid));
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
+ u32 log_level)
+{
+ struct wmi_10_4_dbglog_cfg_cmd *cmd;
+ struct sk_buff *skb;
+ u32 cfg;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
+
+ if (module_enable) {
+ cfg = SM(log_level,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ } else {
+ /* set back defaults, all modules with WARN level */
+ cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ module_enable = ~0;
+ }
+
+ cmd->module_enable = __cpu_to_le64(module_enable);
+ cmd->module_valid = __cpu_to_le64(~0);
+ cmd->config_enable = __cpu_to_le32(cfg);
+ cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
+ __le64_to_cpu(cmd->module_enable),
+ __le64_to_cpu(cmd->module_valid),
+ __le32_to_cpu(cmd->config_enable),
+ __le32_to_cpu(cmd->config_valid));
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
+{
+ struct wmi_pdev_pktlog_enable_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ev_bitmap &= ATH10K_PKTLOG_ANY;
+
+ cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
+ cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
+ ev_bitmap);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, 0);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
+ u32 duration, u32 next_offset,
+ u32 enabled)
+{
+ struct wmi_pdev_set_quiet_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
+ cmd->period = __cpu_to_le32(period);
+ cmd->duration = __cpu_to_le32(duration);
+ cmd->next_start = __cpu_to_le32(next_offset);
+ cmd->enabled = __cpu_to_le32(enabled);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi quiet param: period %u duration %u enabled %d\n",
+ period, duration, enabled);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac)
+{
+ struct wmi_addba_clear_resp_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
+ vdev_id, mac);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size)
+{
+ struct wmi_addba_send_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_addba_send_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = __cpu_to_le32(tid);
+ cmd->buffersize = __cpu_to_le32(buf_size);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
+ vdev_id, mac, tid, buf_size);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status)
+{
+ struct wmi_addba_setresponse_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = __cpu_to_le32(tid);
+ cmd->statuscode = __cpu_to_le32(status);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
+ vdev_id, mac, tid, status);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason)
+{
+ struct wmi_delba_send_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_delba_send_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = __cpu_to_le32(tid);
+ cmd->initiator = __cpu_to_le32(initiator);
+ cmd->reasoncode = __cpu_to_le32(reason);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
+ vdev_id, mac, tid, initiator, reason);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param)
+{
+ struct wmi_pdev_get_tpc_config_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data;
+ cmd->param = __cpu_to_le32(param);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev get tpc config param %d\n", param);
+ return skb;
+}
+
+static void
+ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath10k PDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Channel noise floor", pdev->ch_noise_floor);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Channel TX power", pdev->chan_tx_power);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX frame count", pdev->tx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX frame count", pdev->rx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX clear count", pdev->rx_clear_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Cycle count", pdev->cycle_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY error count", pdev->phy_err_count);
+
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RTS bad count", pdev->rts_bad);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RTS good count", pdev->rts_good);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "FCS bad count", pdev->fcs_bad);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "No beacon count", pdev->no_beacons);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "MIB int count", pdev->mib_int_count);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath10k PDEV TX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies queued", pdev->comp_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies disp.", pdev->comp_delivered);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDU queued", pdev->msdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU queued", pdev->mpdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs dropped", pdev->wmm_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local enqued", pdev->local_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local freed", pdev->local_freed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW queued", pdev->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs reaped", pdev->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num underruns", pdev->underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs cleaned", pdev->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs requeued", pdev->mpdus_requeued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Excessive retries", pdev->tx_ko);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW rate", pdev->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Sched self triggers", pdev->self_triggers);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Dropped due to SW retries",
+ pdev->sw_retry_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Illegal rate phy errors",
+ pdev->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Pdev continuous xretry", pdev->pdev_cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "TX timeout", pdev->pdev_tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PDEV resets", pdev->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY underrun", pdev->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU is more than txop limit", pdev->txop_ovf);
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath10k PDEV RX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Mid PPDU route change",
+ pdev->mid_ppdu_route_change);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Tot. number of statuses", pdev->status_rcvd);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 0", pdev->r0_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 1", pdev->r1_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 2", pdev->r2_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 3", pdev->r3_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to HTT", pdev->htt_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to HTT", pdev->htt_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to stack", pdev->loc_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to stack", pdev->loc_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Oversized AMSDUs", pdev->oversize_amsdu);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors", pdev->phy_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors drops", pdev->phy_err_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "vdev id", vdev->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "beacon snr", vdev->beacon_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "data snr", vdev->data_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx frames", vdev->num_rx_frames);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts fail", vdev->num_rts_fail);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts success", vdev->num_rts_success);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx err", vdev->num_rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx discard", vdev->num_rx_discard);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num tx not acked", vdev->num_tx_not_acked);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames", i,
+ vdev->num_tx_frames[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames retries", i,
+ vdev->num_tx_frames_retries[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames failures", i,
+ vdev->num_tx_frames_failures[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] 0x%08x\n",
+ "tx rate history", i,
+ vdev->tx_rate_history[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "beacon rssi history", i,
+ vdev->beacon_rssi_history[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
+ char *buf, u32 *length, bool extended_peer)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "Peer MAC address", peer->peer_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer RSSI", peer->peer_rssi);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer TX rate", peer->peer_tx_rate);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer RX rate", peer->peer_rx_rate);
+ if (!extended_peer)
+ len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
+ "Peer RX duration", peer->rx_duration);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer *peer,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "Peer MAC address", peer->peer_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %llu\n",
+ "Peer RX duration", peer->rx_duration);
+}
+
+void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf)
+{
+ u32 len = 0;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ const struct ath10k_fw_stats_pdev *pdev;
+ const struct ath10k_fw_stats_vdev *vdev;
+ const struct ath10k_fw_stats_peer *peer;
+ size_t num_peers;
+ size_t num_vdevs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath10k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath10k_warn(ar, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ num_peers = list_count_nodes(&fw_stats->peers);
+ num_vdevs = list_count_nodes(&fw_stats->vdevs);
+
+ ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k VDEV stats", num_vdevs);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+ ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k PEER stats", num_peers);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(peer, &fw_stats->peers, list) {
+ ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
+ fw_stats->extended);
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf)
+{
+ unsigned int len = 0;
+ unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ const struct ath10k_fw_stats_pdev *pdev;
+ const struct ath10k_fw_stats_vdev *vdev;
+ const struct ath10k_fw_stats_peer *peer;
+ size_t num_peers;
+ size_t num_vdevs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath10k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath10k_warn(ar, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ num_peers = list_count_nodes(&fw_stats->peers);
+ num_vdevs = list_count_nodes(&fw_stats->vdevs);
+
+ ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k VDEV stats", num_vdevs);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+ ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k PEER stats", num_peers);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(peer, &fw_stats->peers, list) {
+ ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
+ fw_stats->extended);
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
+ u32 detect_level, u32 detect_margin)
+{
+ struct wmi_pdev_set_adaptive_cca_params *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_adaptive_cca_params *)skb->data;
+ cmd->enable = __cpu_to_le32(enable);
+ cmd->cca_detect_level = __cpu_to_le32(detect_level);
+ cmd->cca_detect_margin = __cpu_to_le32(detect_margin);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev set adaptive cca params enable:%d detection level:%d detection margin:%d\n",
+ enable, detect_level, detect_margin);
+ return skb;
+}
+
+static void
+ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd *vdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ u32 val;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "vdev id", vdev->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "ppdu aggr count", vdev->ppdu_aggr_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "ppdu noack", vdev->ppdu_noack);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "mpdu queued", vdev->mpdu_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "ppdu nonaggr count", vdev->ppdu_nonaggr_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "mpdu sw requeued", vdev->mpdu_sw_requeued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "mpdu success retry", vdev->mpdu_suc_retry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "mpdu success multitry", vdev->mpdu_suc_multitry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "mpdu fail retry", vdev->mpdu_fail_retry);
+ val = vdev->tx_ftm_suc;
+ if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "tx ftm success",
+ MS(val, WMI_VDEV_STATS_FTM_COUNT));
+ val = vdev->tx_ftm_suc_retry;
+ if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "tx ftm success retry",
+ MS(val, WMI_VDEV_STATS_FTM_COUNT));
+ val = vdev->tx_ftm_fail;
+ if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "tx ftm fail",
+ MS(val, WMI_VDEV_STATS_FTM_COUNT));
+ val = vdev->rx_ftmr_cnt;
+ if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "rx ftm request count",
+ MS(val, WMI_VDEV_STATS_FTM_COUNT));
+ val = vdev->rx_ftmr_dup_cnt;
+ if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "rx ftm request dup count",
+ MS(val, WMI_VDEV_STATS_FTM_COUNT));
+ val = vdev->rx_iftmr_cnt;
+ if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "rx initial ftm req count",
+ MS(val, WMI_VDEV_STATS_FTM_COUNT));
+ val = vdev->rx_iftmr_dup_cnt;
+ if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "rx initial ftm req dup cnt",
+ MS(val, WMI_VDEV_STATS_FTM_COUNT));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ *length = len;
+}
+
+void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf)
+{
+ u32 len = 0;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ const struct ath10k_fw_stats_pdev *pdev;
+ const struct ath10k_fw_stats_vdev_extd *vdev;
+ const struct ath10k_fw_stats_peer *peer;
+ const struct ath10k_fw_extd_stats_peer *extd_peer;
+ size_t num_peers;
+ size_t num_vdevs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath10k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath10k_warn(ar, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ num_peers = list_count_nodes(&fw_stats->peers);
+ num_vdevs = list_count_nodes(&fw_stats->vdevs);
+
+ ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW paused", pdev->hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Seqs posted", pdev->seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Seqs failed queueing", pdev->seq_failed_queueing);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Seqs completed", pdev->seq_completed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Seqs restarted", pdev->seq_restarted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MU Seqs posted", pdev->mu_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs SW flushed", pdev->mpdus_sw_flush);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs HW filtered", pdev->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs truncated", pdev->mpdus_truncated);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs receive no ACK", pdev->mpdus_ack_failed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs expired", pdev->mpdus_expired);
+
+ ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num Rx Overflow errors", pdev->rx_ovfl_errs);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k VDEV stats", num_vdevs);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+ list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+ ath10k_wmi_fw_vdev_stats_extd_fill(vdev, buf, &len);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k PEER stats", num_peers);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(peer, &fw_stats->peers, list) {
+ ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
+ fw_stats->extended);
+ }
+
+ if (fw_stats->extended) {
+ list_for_each_entry(extd_peer, &fw_stats->peers_extd, list) {
+ ath10k_wmi_fw_extd_peer_stats_fill(extd_peer, buf,
+ &len);
+ }
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype)
+{
+ switch (subtype) {
+ case WMI_VDEV_SUBTYPE_NONE:
+ return WMI_VDEV_SUBTYPE_LEGACY_NONE;
+ case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+ return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV;
+ case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+ return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI;
+ case WMI_VDEV_SUBTYPE_P2P_GO:
+ return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO;
+ case WMI_VDEV_SUBTYPE_PROXY_STA:
+ return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
+ case WMI_VDEV_SUBTYPE_MESH_11S:
+ case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+ return -EOPNOTSUPP;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype)
+{
+ switch (subtype) {
+ case WMI_VDEV_SUBTYPE_NONE:
+ return WMI_VDEV_SUBTYPE_10_2_4_NONE;
+ case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+ return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV;
+ case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+ return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI;
+ case WMI_VDEV_SUBTYPE_P2P_GO:
+ return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO;
+ case WMI_VDEV_SUBTYPE_PROXY_STA:
+ return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA;
+ case WMI_VDEV_SUBTYPE_MESH_11S:
+ return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
+ case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+ return -EOPNOTSUPP;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype)
+{
+ switch (subtype) {
+ case WMI_VDEV_SUBTYPE_NONE:
+ return WMI_VDEV_SUBTYPE_10_4_NONE;
+ case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+ return WMI_VDEV_SUBTYPE_10_4_P2P_DEV;
+ case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+ return WMI_VDEV_SUBTYPE_10_4_P2P_CLI;
+ case WMI_VDEV_SUBTYPE_P2P_GO:
+ return WMI_VDEV_SUBTYPE_10_4_P2P_GO;
+ case WMI_VDEV_SUBTYPE_PROXY_STA:
+ return WMI_VDEV_SUBTYPE_10_4_PROXY_STA;
+ case WMI_VDEV_SUBTYPE_MESH_11S:
+ return WMI_VDEV_SUBTYPE_10_4_MESH_11S;
+ case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+ return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
+ }
+ return -EOPNOTSUPP;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap)
+{
+ struct wmi_ext_resource_config_10_4_cmd *cmd;
+ struct sk_buff *skb;
+ u32 num_tdls_sleep_sta = 0;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ if (test_bit(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, ar->wmi.svc_map))
+ num_tdls_sleep_sta = TARGET_10_4_NUM_TDLS_SLEEP_STA;
+
+ cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
+ cmd->host_platform_config = __cpu_to_le32(type);
+ cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
+ cmd->wlan_gpio_priority = __cpu_to_le32(ar->coex_gpio_pin);
+ cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT);
+ cmd->coex_gpio_pin1 = __cpu_to_le32(-1);
+ cmd->coex_gpio_pin2 = __cpu_to_le32(-1);
+ cmd->coex_gpio_pin3 = __cpu_to_le32(-1);
+ cmd->num_tdls_vdevs = __cpu_to_le32(TARGET_10_4_NUM_TDLS_VDEVS);
+ cmd->num_tdls_conn_table_entries = __cpu_to_le32(20);
+ cmd->max_tdls_concurrent_sleep_sta = __cpu_to_le32(num_tdls_sleep_sta);
+ cmd->max_tdls_concurrent_buffer_sta =
+ __cpu_to_le32(TARGET_10_4_NUM_TDLS_BUFFER_STA);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi ext resource config host type %d firmware feature bitmap %08x\n",
+ type, fw_feature_bitmap);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tdls_state state)
+{
+ struct wmi_10_4_tdls_set_state_cmd *cmd;
+ struct sk_buff *skb;
+ u32 options = 0;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map) &&
+ state == WMI_TDLS_ENABLE_ACTIVE)
+ state = WMI_TDLS_ENABLE_PASSIVE;
+
+ if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
+ options |= WMI_TDLS_BUFFER_STA_EN;
+
+ cmd = (struct wmi_10_4_tdls_set_state_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->state = __cpu_to_le32(state);
+ cmd->notification_interval_ms = __cpu_to_le32(5000);
+ cmd->tx_discovery_threshold = __cpu_to_le32(100);
+ cmd->tx_teardown_threshold = __cpu_to_le32(5);
+ cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
+ cmd->rssi_delta = __cpu_to_le32(-20);
+ cmd->tdls_options = __cpu_to_le32(options);
+ cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
+ cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
+ cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
+ cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
+ cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
+ cmd->teardown_notification_ms = __cpu_to_le32(10);
+ cmd->tdls_peer_kickout_threshold = __cpu_to_le32(96);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi update fw tdls state %d for vdev %i\n",
+ state, vdev_id);
+ return skb;
+}
+
+static u32 ath10k_wmi_prepare_peer_qos(u8 uapsd_queues, u8 sp)
+{
+ u32 peer_qos = 0;
+
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ peer_qos |= WMI_TDLS_PEER_QOS_AC_VO;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ peer_qos |= WMI_TDLS_PEER_QOS_AC_VI;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ peer_qos |= WMI_TDLS_PEER_QOS_AC_BK;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ peer_qos |= WMI_TDLS_PEER_QOS_AC_BE;
+
+ peer_qos |= SM(sp, WMI_TDLS_PEER_SP);
+
+ return peer_qos;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
+{
+ struct wmi_pdev_get_tpc_table_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_get_tpc_table_cmd *)skb->data;
+ cmd->param = __cpu_to_le32(param);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev get tpc table param:%d\n", param);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan_arg)
+{
+ struct wmi_10_4_tdls_peer_update_cmd *cmd;
+ struct wmi_tdls_peer_capabilities *peer_cap;
+ struct wmi_channel *chan;
+ struct sk_buff *skb;
+ u32 peer_qos;
+ int len, chan_len;
+ int i;
+
+ /* tdls peer update cmd has place holder for one channel*/
+ chan_len = cap->peer_chan_len ? (cap->peer_chan_len - 1) : 0;
+
+ len = sizeof(*cmd) + chan_len * sizeof(*chan);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
+ cmd->peer_state = __cpu_to_le32(arg->peer_state);
+
+ peer_qos = ath10k_wmi_prepare_peer_qos(cap->peer_uapsd_queues,
+ cap->peer_max_sp);
+
+ peer_cap = &cmd->peer_capab;
+ peer_cap->peer_qos = __cpu_to_le32(peer_qos);
+ peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
+ peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
+ peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
+ peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
+ peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
+ peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
+
+ for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
+ peer_cap->peer_operclass[i] = cap->peer_operclass[i];
+
+ peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
+ peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
+ peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
+
+ for (i = 0; i < cap->peer_chan_len; i++) {
+ chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i];
+ ath10k_wmi_put_wmi_channel(ar, chan, &chan_arg[i]);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tdls peer update vdev %i state %d n_chans %u\n",
+ arg->vdev_id, arg->peer_state, cap->peer_chan_len);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar,
+ const struct ath10k_radar_found_info *arg)
+{
+ struct wmi_radar_found_info *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_radar_found_info *)skb->data;
+ cmd->pri_min = __cpu_to_le32(arg->pri_min);
+ cmd->pri_max = __cpu_to_le32(arg->pri_max);
+ cmd->width_min = __cpu_to_le32(arg->width_min);
+ cmd->width_max = __cpu_to_le32(arg->width_max);
+ cmd->sidx_min = __cpu_to_le32(arg->sidx_min);
+ cmd->sidx_max = __cpu_to_le32(arg->sidx_max);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi radar found pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
+ arg->pri_min, arg->pri_max, arg->width_min,
+ arg->width_max, arg->sidx_min, arg->sidx_max);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k *ar,
+ const struct wmi_per_peer_per_tid_cfg_arg *arg)
+{
+ struct wmi_peer_per_tid_cfg_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ memset(skb->data, 0, sizeof(*cmd));
+
+ cmd = (struct wmi_peer_per_tid_cfg_cmd *)skb->data;
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr.addr);
+ cmd->tid = cpu_to_le32(arg->tid);
+ cmd->ack_policy = cpu_to_le32(arg->ack_policy);
+ cmd->aggr_control = cpu_to_le32(arg->aggr_control);
+ cmd->rate_control = cpu_to_le32(arg->rate_ctrl);
+ cmd->retry_count = cpu_to_le32(arg->retry_count);
+ cmd->rcode_flags = cpu_to_le32(arg->rcode_flags);
+ cmd->ext_tid_cfg_bitmap = cpu_to_le32(arg->ext_tid_cfg_bitmap);
+ cmd->rtscts_ctrl = cpu_to_le32(arg->rtscts_ctrl);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi noack tid %d vdev id %d ack_policy %d aggr %u rate_ctrl %u rcflag %u retry_count %d rtscts %d ext_tid_cfg_bitmap %d mac_addr %pM\n",
+ arg->tid, arg->vdev_id, arg->ack_policy, arg->aggr_control,
+ arg->rate_ctrl, arg->rcode_flags, arg->retry_count,
+ arg->rtscts_ctrl, arg->ext_tid_cfg_bitmap, arg->peer_macaddr.addr);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
+{
+ struct wmi_echo_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_echo_cmd *)skb->data;
+ cmd->value = cpu_to_le32(value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi echo value 0x%08x\n", value);
+ return skb;
+}
+
+int
+ath10k_wmi_barrier(struct ath10k *ar)
+{
+ int ret;
+ int time_left;
+
+ spin_lock_bh(&ar->data_lock);
+ reinit_completion(&ar->wmi.barrier);
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->wmi.barrier,
+ ATH10K_WMI_BARRIER_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k *ar,
+ const struct wmi_bb_timing_cfg_arg *arg)
+{
+ struct wmi_pdev_bb_timing_cfg_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_bb_timing_cfg_cmd *)skb->data;
+ cmd->bb_tx_timing = __cpu_to_le32(arg->bb_tx_timing);
+ cmd->bb_xpa_timing = __cpu_to_le32(arg->bb_xpa_timing);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev bb_tx_timing 0x%x bb_xpa_timing 0x%x\n",
+ arg->bb_tx_timing, arg->bb_xpa_timing);
+ return skb;
+}
+
+static const struct wmi_ops wmi_ops = {
+ .rx = ath10k_wmi_op_rx,
+ .map_svc = wmi_main_svc_map,
+
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_init = ath10k_wmi_op_gen_init,
+ .gen_start_scan = ath10k_wmi_op_gen_start_scan,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ /* .gen_vdev_wmm_conf not implemented */
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ /* .gen_pdev_get_temperature not implemented */
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_echo = ath10k_wmi_op_gen_echo,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
+
+ /* .gen_bcn_tmpl not implemented */
+ /* .gen_prb_tmpl not implemented */
+ /* .gen_p2p_go_bcn_ie not implemented */
+ /* .gen_adaptive_qcs not implemented */
+ /* .gen_pdev_enable_adaptive_cca not implemented */
+};
+
+static const struct wmi_ops wmi_10_1_ops = {
+ .rx = ath10k_wmi_10_1_op_rx,
+ .map_svc = wmi_10x_svc_map,
+ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
+ .pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
+ .gen_init = ath10k_wmi_10_1_op_gen_init,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+ .gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
+ /* .gen_pdev_get_temperature not implemented */
+
+ /* shared with main branch */
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ /* .gen_vdev_wmm_conf not implemented */
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_echo = ath10k_wmi_op_gen_echo,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
+ /* .gen_bcn_tmpl not implemented */
+ /* .gen_prb_tmpl not implemented */
+ /* .gen_p2p_go_bcn_ie not implemented */
+ /* .gen_adaptive_qcs not implemented */
+ /* .gen_pdev_enable_adaptive_cca not implemented */
+};
+
+static const struct wmi_ops wmi_10_2_ops = {
+ .rx = ath10k_wmi_10_2_op_rx,
+ .pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
+ .gen_init = ath10k_wmi_10_2_op_gen_init,
+ .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+ /* .gen_pdev_get_temperature not implemented */
+
+ /* shared with 10.1 */
+ .map_svc = wmi_10x_svc_map,
+ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+ .gen_echo = ath10k_wmi_op_gen_echo,
+
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ /* .gen_vdev_wmm_conf not implemented */
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
+ /* .gen_pdev_enable_adaptive_cca not implemented */
+};
+
+static const struct wmi_ops wmi_10_2_4_ops = {
+ .rx = ath10k_wmi_10_2_op_rx,
+ .pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
+ .gen_init = ath10k_wmi_10_2_op_gen_init,
+ .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+ .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
+ .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
+
+ /* shared with 10.1 */
+ .map_svc = wmi_10x_svc_map,
+ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+ .gen_echo = ath10k_wmi_op_gen_echo,
+
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
+ .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+ .gen_pdev_enable_adaptive_cca =
+ ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
+ .get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
+ .gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
+ /* .gen_bcn_tmpl not implemented */
+ /* .gen_prb_tmpl not implemented */
+ /* .gen_p2p_go_bcn_ie not implemented */
+ /* .gen_adaptive_qcs not implemented */
+};
+
+static const struct wmi_ops wmi_10_4_ops = {
+ .rx = ath10k_wmi_10_4_op_rx,
+ .map_svc = wmi_10_4_svc_map,
+
+ .pull_fw_stats = ath10k_wmi_10_4_op_pull_fw_stats,
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr,
+ .pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
+ .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_dfs_status_ev = ath10k_wmi_10_4_op_pull_dfs_status_ev,
+ .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_base_macaddr = ath10k_wmi_op_gen_pdev_set_base_macaddr,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_init = ath10k_wmi_10_4_op_gen_init,
+ .gen_start_scan = ath10k_wmi_op_gen_start_scan,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_peer_assoc = ath10k_wmi_10_4_op_gen_peer_assoc,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ .fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
+ .ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
+ .gen_update_fw_tdls_state = ath10k_wmi_10_4_gen_update_fw_tdls_state,
+ .gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update,
+ .gen_pdev_get_tpc_table_cmdid =
+ ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid,
+ .gen_radar_found = ath10k_wmi_10_4_gen_radar_found,
+ .gen_per_peer_per_tid_cfg = ath10k_wmi_10_4_gen_per_peer_per_tid_cfg,
+
+ /* shared with 10.2 */
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
+ .get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
+ .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
+ .gen_echo = ath10k_wmi_op_gen_echo,
+ .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
+};
+
+int ath10k_wmi_attach(struct ath10k *ar)
+{
+ switch (ar->running_fw->fw_file.wmi_op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ ar->wmi.ops = &wmi_10_4_ops;
+ ar->wmi.cmd = &wmi_10_4_cmd_map;
+ ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
+ ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
+ ar->wmi_key_cipher = wmi_key_cipher_suites;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ ar->wmi.cmd = &wmi_10_2_4_cmd_map;
+ ar->wmi.ops = &wmi_10_2_4_ops;
+ ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
+ ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
+ ar->wmi_key_cipher = wmi_key_cipher_suites;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ ar->wmi.cmd = &wmi_10_2_cmd_map;
+ ar->wmi.ops = &wmi_10_2_ops;
+ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
+ ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
+ ar->wmi_key_cipher = wmi_key_cipher_suites;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ ar->wmi.cmd = &wmi_10x_cmd_map;
+ ar->wmi.ops = &wmi_10_1_ops;
+ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
+ ar->wmi.peer_flags = &wmi_10x_peer_flags_map;
+ ar->wmi_key_cipher = wmi_key_cipher_suites;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
+ ar->wmi.cmd = &wmi_cmd_map;
+ ar->wmi.ops = &wmi_ops;
+ ar->wmi.vdev_param = &wmi_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_pdev_param_map;
+ ar->wmi.peer_param = &wmi_peer_param_map;
+ ar->wmi.peer_flags = &wmi_peer_flags_map;
+ ar->wmi_key_cipher = wmi_key_cipher_suites;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ ath10k_wmi_tlv_attach(ar);
+ ar->wmi_key_cipher = wmi_tlv_key_cipher_suites;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ ath10k_err(ar, "unsupported WMI op version: %d\n",
+ ar->running_fw->fw_file.wmi_op_version);
+ return -EINVAL;
+ }
+
+ init_completion(&ar->wmi.service_ready);
+ init_completion(&ar->wmi.unified_ready);
+ init_completion(&ar->wmi.barrier);
+ init_completion(&ar->wmi.radar_confirm);
+
+ INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
+ INIT_WORK(&ar->radar_confirmation_work,
+ ath10k_radar_confirmation_work);
+
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features)) {
+ idr_init(&ar->wmi.mgmt_pending_tx);
+ }
+
+ return 0;
+}
+
+void ath10k_wmi_free_host_mem(struct ath10k *ar)
+{
+ int i;
+
+ /* free the host memory chunks requested by firmware */
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ dma_free_coherent(ar->dev,
+ ar->wmi.mem_chunks[i].len,
+ ar->wmi.mem_chunks[i].vaddr,
+ ar->wmi.mem_chunks[i].paddr);
+ }
+
+ ar->wmi.num_mem_chunks = 0;
+}
+
+static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
+ void *ctx)
+{
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr;
+ struct ath10k *ar = ctx;
+ struct sk_buff *msdu;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "force cleanup mgmt msdu_id %u\n", msdu_id);
+
+ msdu = pkt_addr->vaddr;
+ dma_unmap_single(ar->dev, pkt_addr->paddr,
+ msdu->len, DMA_TO_DEVICE);
+ ieee80211_free_txskb(ar->hw, msdu);
+ kfree(pkt_addr);
+
+ return 0;
+}
+
+void ath10k_wmi_detach(struct ath10k *ar)
+{
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features)) {
+ spin_lock_bh(&ar->data_lock);
+ idr_for_each(&ar->wmi.mgmt_pending_tx,
+ ath10k_wmi_mgmt_tx_clean_up_pending, ar);
+ idr_destroy(&ar->wmi.mgmt_pending_tx);
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ cancel_work_sync(&ar->svc_rdy_work);
+ dev_kfree_skb(ar->svc_rdy_skb);
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
new file mode 100644
index 000000000000..7f50a1de6b97
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -0,0 +1,7542 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef _WMI_H_
+#define _WMI_H_
+
+#include <linux/types.h>
+#include <linux/ieee80211.h>
+
+/*
+ * This file specifies the WMI interface for the Unified Software
+ * Architecture.
+ *
+ * It includes definitions of all the commands and events. Commands are
+ * messages from the host to the target. Events and Replies are messages
+ * from the target to the host.
+ *
+ * Ownership of correctness in regards to WMI commands belongs to the host
+ * driver and the target is not required to validate parameters for value,
+ * proper range, or any other checking.
+ *
+ * Guidelines for extending this interface are below.
+ *
+ * 1. Add new WMI commands ONLY within the specified range - 0x9000 - 0x9fff
+ *
+ * 2. Use ONLY u32 type for defining member variables within WMI
+ * command/event structures. Do not use u8, u16, bool or
+ * enum types within these structures.
+ *
+ * 3. DO NOT define bit fields within structures. Implement bit fields
+ * using masks if necessary. Do not use the programming language's bit
+ * field definition.
+ *
+ * 4. Define macros for encode/decode of u8, u16 fields within
+ * the u32 variables. Use these macros for set/get of these fields.
+ * Try to use this to optimize the structure without bloating it with
+ * u32 variables for every lower sized field.
+ *
+ * 5. Do not use PACK/UNPACK attributes for the structures as each member
+ * variable is already 4-byte aligned by virtue of being a u32
+ * type.
+ *
+ * 6. Comment each parameter part of the WMI command/event structure by
+ * using the 2 stars at the beginning of C comment instead of one star to
+ * enable HTML document generation using Doxygen.
+ *
+ */
+
+/* Control Path */
+struct wmi_cmd_hdr {
+ __le32 cmd_id;
+} __packed;
+
+#define WMI_CMD_HDR_CMD_ID_MASK 0x00FFFFFF
+#define WMI_CMD_HDR_CMD_ID_LSB 0
+#define WMI_CMD_HDR_PLT_PRIV_MASK 0xFF000000
+#define WMI_CMD_HDR_PLT_PRIV_LSB 24
+
+#define HTC_PROTOCOL_VERSION 0x0002
+#define WMI_PROTOCOL_VERSION 0x0002
+
+/*
+ * There is no signed version of __le32, so for a temporary solution come
+ * up with our own version. The idea is from fs/ntfs/endian.h.
+ *
+ * Use a_ prefix so that it doesn't conflict if we get proper support to
+ * linux/types.h.
+ */
+typedef __s32 __bitwise a_sle32;
+
+static inline a_sle32 a_cpu_to_sle32(s32 val)
+{
+ return (__force a_sle32)cpu_to_le32(val);
+}
+
+static inline s32 a_sle32_to_cpu(a_sle32 val)
+{
+ return le32_to_cpu((__force __le32)val);
+}
+
+enum wmi_service {
+ WMI_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_ROAM_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_DFS,
+ WMI_SERVICE_11AC,
+ WMI_SERVICE_BLOCKACK,
+ WMI_SERVICE_PHYERR,
+ WMI_SERVICE_BCN_FILTER,
+ WMI_SERVICE_RTT,
+ WMI_SERVICE_RATECTRL,
+ WMI_SERVICE_WOW,
+ WMI_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_ARPNS_OFFLOAD,
+ WMI_SERVICE_NLO,
+ WMI_SERVICE_GTK_OFFLOAD,
+ WMI_SERVICE_SCAN_SCH,
+ WMI_SERVICE_CSA_OFFLOAD,
+ WMI_SERVICE_CHATTER,
+ WMI_SERVICE_COEX_FREQAVOID,
+ WMI_SERVICE_PACKET_POWER_SAVE,
+ WMI_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_GPIO,
+ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_SERVICE_STA_KEEP_ALIVE,
+ WMI_SERVICE_TX_ENCAP,
+ WMI_SERVICE_BURST,
+ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_SERVICE_ROAM_SCAN_OFFLOAD,
+ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_SERVICE_EARLY_RX,
+ WMI_SERVICE_STA_SMPS,
+ WMI_SERVICE_FWTEST,
+ WMI_SERVICE_STA_WMMAC,
+ WMI_SERVICE_TDLS,
+ WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE,
+ WMI_SERVICE_ADAPTIVE_OCS,
+ WMI_SERVICE_BA_SSN_SUPPORT,
+ WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
+ WMI_SERVICE_WLAN_HB,
+ WMI_SERVICE_LTE_ANT_SHARE_SUPPORT,
+ WMI_SERVICE_BATCH_SCAN,
+ WMI_SERVICE_QPOWER,
+ WMI_SERVICE_PLMREQ,
+ WMI_SERVICE_THERMAL_MGMT,
+ WMI_SERVICE_RMC,
+ WMI_SERVICE_MHF_OFFLOAD,
+ WMI_SERVICE_COEX_SAR,
+ WMI_SERVICE_BCN_TXRATE_OVERRIDE,
+ WMI_SERVICE_NAN,
+ WMI_SERVICE_L1SS_STAT,
+ WMI_SERVICE_ESTIMATE_LINKSPEED,
+ WMI_SERVICE_OBSS_SCAN,
+ WMI_SERVICE_TDLS_OFFCHAN,
+ WMI_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_SERVICE_IBSS_PWRSAVE,
+ WMI_SERVICE_LPASS,
+ WMI_SERVICE_EXTSCAN,
+ WMI_SERVICE_D0WOW,
+ WMI_SERVICE_HSOFFLOAD,
+ WMI_SERVICE_ROAM_HO_OFFLOAD,
+ WMI_SERVICE_RX_FULL_REORDER,
+ WMI_SERVICE_DHCP_OFFLOAD,
+ WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
+ WMI_SERVICE_MDNS_OFFLOAD,
+ WMI_SERVICE_SAP_AUTH_OFFLOAD,
+ WMI_SERVICE_ATF,
+ WMI_SERVICE_COEX_GPIO,
+ WMI_SERVICE_ENHANCED_PROXY_STA,
+ WMI_SERVICE_TT,
+ WMI_SERVICE_PEER_CACHING,
+ WMI_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_SERVICE_MESH_11S,
+ WMI_SERVICE_MESH_NON_11S,
+ WMI_SERVICE_PEER_STATS,
+ WMI_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_SERVICE_TX_MODE_PUSH_ONLY,
+ WMI_SERVICE_TX_MODE_PUSH_PULL,
+ WMI_SERVICE_TX_MODE_DYNAMIC,
+ WMI_SERVICE_VDEV_RX_FILTER,
+ WMI_SERVICE_BTCOEX,
+ WMI_SERVICE_CHECK_CAL_VERSION,
+ WMI_SERVICE_DBGLOG_WARN2,
+ WMI_SERVICE_BTCOEX_DUTY_CYCLE,
+ WMI_SERVICE_4_WIRE_COEX_SUPPORT,
+ WMI_SERVICE_EXTENDED_NSS_SUPPORT,
+ WMI_SERVICE_PROG_GPIO_BAND_SELECT,
+ WMI_SERVICE_SMART_LOGGING_SUPPORT,
+ WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
+ WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+ WMI_SERVICE_MGMT_TX_WMI,
+ WMI_SERVICE_TDLS_WIDER_BANDWIDTH,
+ WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+ WMI_SERVICE_HOST_DFS_CHECK_SUPPORT,
+ WMI_SERVICE_TPC_STATS_FINAL,
+ WMI_SERVICE_RESET_CHIP,
+ WMI_SERVICE_SPOOF_MAC_SUPPORT,
+ WMI_SERVICE_TX_DATA_ACK_RSSI,
+ WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
+ WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT,
+ WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT,
+ WMI_SERVICE_THERM_THROT,
+ WMI_SERVICE_RTT_RESPONDER_ROLE,
+ WMI_SERVICE_PER_PACKET_SW_ENCRYPT,
+ WMI_SERVICE_REPORT_AIRTIME,
+ WMI_SERVICE_SYNC_DELETE_CMDS,
+ WMI_SERVICE_TX_PWR_PER_PEER,
+ WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
+ WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT,
+ WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
+
+ /* Remember to add the new value to wmi_service_name()! */
+
+ /* keep last */
+ WMI_SERVICE_MAX,
+};
+
+enum wmi_10x_service {
+ WMI_10X_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_10X_SERVICE_SCAN_OFFLOAD,
+ WMI_10X_SERVICE_ROAM_OFFLOAD,
+ WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_10X_SERVICE_STA_PWRSAVE,
+ WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_10X_SERVICE_AP_UAPSD,
+ WMI_10X_SERVICE_AP_DFS,
+ WMI_10X_SERVICE_11AC,
+ WMI_10X_SERVICE_BLOCKACK,
+ WMI_10X_SERVICE_PHYERR,
+ WMI_10X_SERVICE_BCN_FILTER,
+ WMI_10X_SERVICE_RTT,
+ WMI_10X_SERVICE_RATECTRL,
+ WMI_10X_SERVICE_WOW,
+ WMI_10X_SERVICE_RATECTRL_CACHE,
+ WMI_10X_SERVICE_IRAM_TIDS,
+ WMI_10X_SERVICE_BURST,
+
+ /* introduced in 10.2 */
+ WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_10X_SERVICE_FORCE_FW_HANG,
+ WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_10X_SERVICE_ATF,
+ WMI_10X_SERVICE_COEX_GPIO,
+ WMI_10X_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_10X_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_10X_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_10X_SERVICE_MESH,
+ WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_10X_SERVICE_PEER_STATS,
+ WMI_10X_SERVICE_RESET_CHIP,
+ WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+ WMI_10X_SERVICE_VDEV_BCN_RATE_CONTROL,
+ WMI_10X_SERVICE_PER_PACKET_SW_ENCRYPT,
+ WMI_10X_SERVICE_BB_TIMING_CONFIG_SUPPORT,
+};
+
+enum wmi_main_service {
+ WMI_MAIN_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_MAIN_SERVICE_SCAN_OFFLOAD,
+ WMI_MAIN_SERVICE_ROAM_OFFLOAD,
+ WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_MAIN_SERVICE_STA_PWRSAVE,
+ WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_MAIN_SERVICE_AP_UAPSD,
+ WMI_MAIN_SERVICE_AP_DFS,
+ WMI_MAIN_SERVICE_11AC,
+ WMI_MAIN_SERVICE_BLOCKACK,
+ WMI_MAIN_SERVICE_PHYERR,
+ WMI_MAIN_SERVICE_BCN_FILTER,
+ WMI_MAIN_SERVICE_RTT,
+ WMI_MAIN_SERVICE_RATECTRL,
+ WMI_MAIN_SERVICE_WOW,
+ WMI_MAIN_SERVICE_RATECTRL_CACHE,
+ WMI_MAIN_SERVICE_IRAM_TIDS,
+ WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
+ WMI_MAIN_SERVICE_NLO,
+ WMI_MAIN_SERVICE_GTK_OFFLOAD,
+ WMI_MAIN_SERVICE_SCAN_SCH,
+ WMI_MAIN_SERVICE_CSA_OFFLOAD,
+ WMI_MAIN_SERVICE_CHATTER,
+ WMI_MAIN_SERVICE_COEX_FREQAVOID,
+ WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
+ WMI_MAIN_SERVICE_FORCE_FW_HANG,
+ WMI_MAIN_SERVICE_GPIO,
+ WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
+ WMI_MAIN_SERVICE_TX_ENCAP,
+};
+
+enum wmi_10_4_service {
+ WMI_10_4_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_10_4_SERVICE_SCAN_OFFLOAD,
+ WMI_10_4_SERVICE_ROAM_OFFLOAD,
+ WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_10_4_SERVICE_STA_PWRSAVE,
+ WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_10_4_SERVICE_AP_UAPSD,
+ WMI_10_4_SERVICE_AP_DFS,
+ WMI_10_4_SERVICE_11AC,
+ WMI_10_4_SERVICE_BLOCKACK,
+ WMI_10_4_SERVICE_PHYERR,
+ WMI_10_4_SERVICE_BCN_FILTER,
+ WMI_10_4_SERVICE_RTT,
+ WMI_10_4_SERVICE_RATECTRL,
+ WMI_10_4_SERVICE_WOW,
+ WMI_10_4_SERVICE_RATECTRL_CACHE,
+ WMI_10_4_SERVICE_IRAM_TIDS,
+ WMI_10_4_SERVICE_BURST,
+ WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_10_4_SERVICE_GTK_OFFLOAD,
+ WMI_10_4_SERVICE_SCAN_SCH,
+ WMI_10_4_SERVICE_CSA_OFFLOAD,
+ WMI_10_4_SERVICE_CHATTER,
+ WMI_10_4_SERVICE_COEX_FREQAVOID,
+ WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+ WMI_10_4_SERVICE_FORCE_FW_HANG,
+ WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_10_4_SERVICE_GPIO,
+ WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+ WMI_10_4_SERVICE_TX_ENCAP,
+ WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_10_4_SERVICE_EARLY_RX,
+ WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+ WMI_10_4_SERVICE_TT,
+ WMI_10_4_SERVICE_ATF,
+ WMI_10_4_SERVICE_PEER_CACHING,
+ WMI_10_4_SERVICE_COEX_GPIO,
+ WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_10_4_SERVICE_MESH_NON_11S,
+ WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_10_4_SERVICE_PEER_STATS,
+ WMI_10_4_SERVICE_MESH_11S,
+ WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+ WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+ WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
+ WMI_10_4_SERVICE_VDEV_RX_FILTER,
+ WMI_10_4_SERVICE_BTCOEX,
+ WMI_10_4_SERVICE_CHECK_CAL_VERSION,
+ WMI_10_4_SERVICE_DBGLOG_WARN2,
+ WMI_10_4_SERVICE_BTCOEX_DUTY_CYCLE,
+ WMI_10_4_SERVICE_4_WIRE_COEX_SUPPORT,
+ WMI_10_4_SERVICE_EXTENDED_NSS_SUPPORT,
+ WMI_10_4_SERVICE_PROG_GPIO_BAND_SELECT,
+ WMI_10_4_SERVICE_SMART_LOGGING_SUPPORT,
+ WMI_10_4_SERVICE_TDLS,
+ WMI_10_4_SERVICE_TDLS_OFFCHAN,
+ WMI_10_4_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
+ WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+ WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
+ WMI_10_4_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+ WMI_10_4_SERVICE_HOST_DFS_CHECK_SUPPORT,
+ WMI_10_4_SERVICE_TPC_STATS_FINAL,
+ WMI_10_4_SERVICE_CFR_CAPTURE_SUPPORT,
+ WMI_10_4_SERVICE_TX_DATA_ACK_RSSI,
+ WMI_10_4_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_LEGACY,
+ WMI_10_4_SERVICE_PER_PACKET_SW_ENCRYPT,
+ WMI_10_4_SERVICE_PEER_TID_CONFIGS_SUPPORT,
+ WMI_10_4_SERVICE_VDEV_BCN_RATE_CONTROL,
+ WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
+ WMI_10_4_SERVICE_HTT_ASSERT_TRIGGER_SUPPORT,
+ WMI_10_4_SERVICE_VDEV_FILTER_NEIGHBOR_RX_PACKETS,
+ WMI_10_4_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT,
+ WMI_10_4_SERVICE_PEER_CHWIDTH_CHANGE,
+ WMI_10_4_SERVICE_RX_FILTER_OUT_COUNT,
+ WMI_10_4_SERVICE_RTT_RESPONDER_ROLE,
+ WMI_10_4_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
+ WMI_10_4_SERVICE_REPORT_AIRTIME,
+ WMI_10_4_SERVICE_TX_PWR_PER_PEER,
+ WMI_10_4_SERVICE_FETCH_PEER_TX_PN,
+ WMI_10_4_SERVICE_MULTIPLE_VDEV_RESTART,
+ WMI_10_4_SERVICE_ENHANCED_RADIO_COUNTERS,
+ WMI_10_4_SERVICE_QINQ_SUPPORT,
+ WMI_10_4_SERVICE_RESET_CHIP,
+};
+
+static inline char *wmi_service_name(enum wmi_service service_id)
+{
+#define SVCSTR(x) case x: return #x
+
+ switch (service_id) {
+ SVCSTR(WMI_SERVICE_BEACON_OFFLOAD);
+ SVCSTR(WMI_SERVICE_SCAN_OFFLOAD);
+ SVCSTR(WMI_SERVICE_ROAM_OFFLOAD);
+ SVCSTR(WMI_SERVICE_BCN_MISS_OFFLOAD);
+ SVCSTR(WMI_SERVICE_STA_PWRSAVE);
+ SVCSTR(WMI_SERVICE_STA_ADVANCED_PWRSAVE);
+ SVCSTR(WMI_SERVICE_AP_UAPSD);
+ SVCSTR(WMI_SERVICE_AP_DFS);
+ SVCSTR(WMI_SERVICE_11AC);
+ SVCSTR(WMI_SERVICE_BLOCKACK);
+ SVCSTR(WMI_SERVICE_PHYERR);
+ SVCSTR(WMI_SERVICE_BCN_FILTER);
+ SVCSTR(WMI_SERVICE_RTT);
+ SVCSTR(WMI_SERVICE_RATECTRL);
+ SVCSTR(WMI_SERVICE_WOW);
+ SVCSTR(WMI_SERVICE_RATECTRL_CACHE);
+ SVCSTR(WMI_SERVICE_IRAM_TIDS);
+ SVCSTR(WMI_SERVICE_ARPNS_OFFLOAD);
+ SVCSTR(WMI_SERVICE_NLO);
+ SVCSTR(WMI_SERVICE_GTK_OFFLOAD);
+ SVCSTR(WMI_SERVICE_SCAN_SCH);
+ SVCSTR(WMI_SERVICE_CSA_OFFLOAD);
+ SVCSTR(WMI_SERVICE_CHATTER);
+ SVCSTR(WMI_SERVICE_COEX_FREQAVOID);
+ SVCSTR(WMI_SERVICE_PACKET_POWER_SAVE);
+ SVCSTR(WMI_SERVICE_FORCE_FW_HANG);
+ SVCSTR(WMI_SERVICE_GPIO);
+ SVCSTR(WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM);
+ SVCSTR(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG);
+ SVCSTR(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG);
+ SVCSTR(WMI_SERVICE_STA_KEEP_ALIVE);
+ SVCSTR(WMI_SERVICE_TX_ENCAP);
+ SVCSTR(WMI_SERVICE_BURST);
+ SVCSTR(WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT);
+ SVCSTR(WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT);
+ SVCSTR(WMI_SERVICE_ROAM_SCAN_OFFLOAD);
+ SVCSTR(WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC);
+ SVCSTR(WMI_SERVICE_EARLY_RX);
+ SVCSTR(WMI_SERVICE_STA_SMPS);
+ SVCSTR(WMI_SERVICE_FWTEST);
+ SVCSTR(WMI_SERVICE_STA_WMMAC);
+ SVCSTR(WMI_SERVICE_TDLS);
+ SVCSTR(WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE);
+ SVCSTR(WMI_SERVICE_ADAPTIVE_OCS);
+ SVCSTR(WMI_SERVICE_BA_SSN_SUPPORT);
+ SVCSTR(WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE);
+ SVCSTR(WMI_SERVICE_WLAN_HB);
+ SVCSTR(WMI_SERVICE_LTE_ANT_SHARE_SUPPORT);
+ SVCSTR(WMI_SERVICE_BATCH_SCAN);
+ SVCSTR(WMI_SERVICE_QPOWER);
+ SVCSTR(WMI_SERVICE_PLMREQ);
+ SVCSTR(WMI_SERVICE_THERMAL_MGMT);
+ SVCSTR(WMI_SERVICE_RMC);
+ SVCSTR(WMI_SERVICE_MHF_OFFLOAD);
+ SVCSTR(WMI_SERVICE_COEX_SAR);
+ SVCSTR(WMI_SERVICE_BCN_TXRATE_OVERRIDE);
+ SVCSTR(WMI_SERVICE_NAN);
+ SVCSTR(WMI_SERVICE_L1SS_STAT);
+ SVCSTR(WMI_SERVICE_ESTIMATE_LINKSPEED);
+ SVCSTR(WMI_SERVICE_OBSS_SCAN);
+ SVCSTR(WMI_SERVICE_TDLS_OFFCHAN);
+ SVCSTR(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA);
+ SVCSTR(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA);
+ SVCSTR(WMI_SERVICE_IBSS_PWRSAVE);
+ SVCSTR(WMI_SERVICE_LPASS);
+ SVCSTR(WMI_SERVICE_EXTSCAN);
+ SVCSTR(WMI_SERVICE_D0WOW);
+ SVCSTR(WMI_SERVICE_HSOFFLOAD);
+ SVCSTR(WMI_SERVICE_ROAM_HO_OFFLOAD);
+ SVCSTR(WMI_SERVICE_RX_FULL_REORDER);
+ SVCSTR(WMI_SERVICE_DHCP_OFFLOAD);
+ SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
+ SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
+ SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
+ SVCSTR(WMI_SERVICE_ATF);
+ SVCSTR(WMI_SERVICE_COEX_GPIO);
+ SVCSTR(WMI_SERVICE_ENHANCED_PROXY_STA);
+ SVCSTR(WMI_SERVICE_TT);
+ SVCSTR(WMI_SERVICE_PEER_CACHING);
+ SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF);
+ SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF);
+ SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64);
+ SVCSTR(WMI_SERVICE_EXT_RES_CFG_SUPPORT);
+ SVCSTR(WMI_SERVICE_MESH_11S);
+ SVCSTR(WMI_SERVICE_MESH_NON_11S);
+ SVCSTR(WMI_SERVICE_PEER_STATS);
+ SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
+ SVCSTR(WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT);
+ SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
+ SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
+ SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
+ SVCSTR(WMI_SERVICE_VDEV_RX_FILTER);
+ SVCSTR(WMI_SERVICE_BTCOEX);
+ SVCSTR(WMI_SERVICE_CHECK_CAL_VERSION);
+ SVCSTR(WMI_SERVICE_DBGLOG_WARN2);
+ SVCSTR(WMI_SERVICE_BTCOEX_DUTY_CYCLE);
+ SVCSTR(WMI_SERVICE_4_WIRE_COEX_SUPPORT);
+ SVCSTR(WMI_SERVICE_EXTENDED_NSS_SUPPORT);
+ SVCSTR(WMI_SERVICE_PROG_GPIO_BAND_SELECT);
+ SVCSTR(WMI_SERVICE_SMART_LOGGING_SUPPORT);
+ SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE);
+ SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY);
+ SVCSTR(WMI_SERVICE_MGMT_TX_WMI);
+ SVCSTR(WMI_SERVICE_TDLS_WIDER_BANDWIDTH);
+ SVCSTR(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS);
+ SVCSTR(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT);
+ SVCSTR(WMI_SERVICE_TPC_STATS_FINAL);
+ SVCSTR(WMI_SERVICE_RESET_CHIP);
+ SVCSTR(WMI_SERVICE_SPOOF_MAC_SUPPORT);
+ SVCSTR(WMI_SERVICE_TX_DATA_ACK_RSSI);
+ SVCSTR(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT);
+ SVCSTR(WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT);
+ SVCSTR(WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT);
+ SVCSTR(WMI_SERVICE_THERM_THROT);
+ SVCSTR(WMI_SERVICE_RTT_RESPONDER_ROLE);
+ SVCSTR(WMI_SERVICE_PER_PACKET_SW_ENCRYPT);
+ SVCSTR(WMI_SERVICE_REPORT_AIRTIME);
+ SVCSTR(WMI_SERVICE_SYNC_DELETE_CMDS);
+ SVCSTR(WMI_SERVICE_TX_PWR_PER_PEER);
+ SVCSTR(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS);
+ SVCSTR(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT);
+ SVCSTR(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT);
+
+ case WMI_SERVICE_MAX:
+ return NULL;
+ }
+
+#undef SVCSTR
+
+ return NULL;
+}
+
+#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
+ ((svc_id) < (len) && \
+ __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \
+ BIT((svc_id) % (sizeof(u32))))
+
+/* This extension is required to accommodate new services, current limit
+ * for wmi_services is 64 as target is using only 4-bits of each 32-bit
+ * wmi_service word. Extending this to make use of remaining unused bits
+ * for new services.
+ */
+#define WMI_EXT_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
+ ((svc_id) >= (len) && \
+ __le32_to_cpu((wmi_svc_bmap)[((svc_id) - (len)) / 28]) & \
+ BIT(((((svc_id) - (len)) % 28) & 0x1f) + 4))
+
+#define SVCMAP(x, y, len) \
+ do { \
+ if ((WMI_SERVICE_IS_ENABLED((in), (x), (len))) || \
+ (WMI_EXT_SERVICE_IS_ENABLED((in), (x), (len)))) \
+ __set_bit(y, out); \
+ } while (0)
+
+static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ SVCMAP(WMI_10X_SERVICE_BEACON_OFFLOAD,
+ WMI_SERVICE_BEACON_OFFLOAD, len);
+ SVCMAP(WMI_10X_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_10X_SERVICE_ROAM_OFFLOAD,
+ WMI_SERVICE_ROAM_OFFLOAD, len);
+ SVCMAP(WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+ SVCMAP(WMI_10X_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_PWRSAVE, len);
+ SVCMAP(WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+ SVCMAP(WMI_10X_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_UAPSD, len);
+ SVCMAP(WMI_10X_SERVICE_AP_DFS,
+ WMI_SERVICE_AP_DFS, len);
+ SVCMAP(WMI_10X_SERVICE_11AC,
+ WMI_SERVICE_11AC, len);
+ SVCMAP(WMI_10X_SERVICE_BLOCKACK,
+ WMI_SERVICE_BLOCKACK, len);
+ SVCMAP(WMI_10X_SERVICE_PHYERR,
+ WMI_SERVICE_PHYERR, len);
+ SVCMAP(WMI_10X_SERVICE_BCN_FILTER,
+ WMI_SERVICE_BCN_FILTER, len);
+ SVCMAP(WMI_10X_SERVICE_RTT,
+ WMI_SERVICE_RTT, len);
+ SVCMAP(WMI_10X_SERVICE_RATECTRL,
+ WMI_SERVICE_RATECTRL, len);
+ SVCMAP(WMI_10X_SERVICE_WOW,
+ WMI_SERVICE_WOW, len);
+ SVCMAP(WMI_10X_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_RATECTRL_CACHE, len);
+ SVCMAP(WMI_10X_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_IRAM_TIDS, len);
+ SVCMAP(WMI_10X_SERVICE_BURST,
+ WMI_SERVICE_BURST, len);
+ SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
+ SVCMAP(WMI_10X_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_FORCE_FW_HANG, len);
+ SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+ SVCMAP(WMI_10X_SERVICE_ATF,
+ WMI_SERVICE_ATF, len);
+ SVCMAP(WMI_10X_SERVICE_COEX_GPIO,
+ WMI_SERVICE_COEX_GPIO, len);
+ SVCMAP(WMI_10X_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_SERVICE_AUX_SPECTRAL_INTF, len);
+ SVCMAP(WMI_10X_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_SERVICE_AUX_CHAN_LOAD_INTF, len);
+ SVCMAP(WMI_10X_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
+ SVCMAP(WMI_10X_SERVICE_MESH,
+ WMI_SERVICE_MESH_11S, len);
+ SVCMAP(WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_SERVICE_EXT_RES_CFG_SUPPORT, len);
+ SVCMAP(WMI_10X_SERVICE_PEER_STATS,
+ WMI_SERVICE_PEER_STATS, len);
+ SVCMAP(WMI_10X_SERVICE_RESET_CHIP,
+ WMI_SERVICE_RESET_CHIP, len);
+ SVCMAP(WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+ WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, len);
+ SVCMAP(WMI_10X_SERVICE_BB_TIMING_CONFIG_SUPPORT,
+ WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, len);
+ SVCMAP(WMI_10X_SERVICE_PER_PACKET_SW_ENCRYPT,
+ WMI_SERVICE_PER_PACKET_SW_ENCRYPT, len);
+}
+
+static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ SVCMAP(WMI_MAIN_SERVICE_BEACON_OFFLOAD,
+ WMI_SERVICE_BEACON_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_ROAM_OFFLOAD,
+ WMI_SERVICE_ROAM_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_PWRSAVE, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+ SVCMAP(WMI_MAIN_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_UAPSD, len);
+ SVCMAP(WMI_MAIN_SERVICE_AP_DFS,
+ WMI_SERVICE_AP_DFS, len);
+ SVCMAP(WMI_MAIN_SERVICE_11AC,
+ WMI_SERVICE_11AC, len);
+ SVCMAP(WMI_MAIN_SERVICE_BLOCKACK,
+ WMI_SERVICE_BLOCKACK, len);
+ SVCMAP(WMI_MAIN_SERVICE_PHYERR,
+ WMI_SERVICE_PHYERR, len);
+ SVCMAP(WMI_MAIN_SERVICE_BCN_FILTER,
+ WMI_SERVICE_BCN_FILTER, len);
+ SVCMAP(WMI_MAIN_SERVICE_RTT,
+ WMI_SERVICE_RTT, len);
+ SVCMAP(WMI_MAIN_SERVICE_RATECTRL,
+ WMI_SERVICE_RATECTRL, len);
+ SVCMAP(WMI_MAIN_SERVICE_WOW,
+ WMI_SERVICE_WOW, len);
+ SVCMAP(WMI_MAIN_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_RATECTRL_CACHE, len);
+ SVCMAP(WMI_MAIN_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_IRAM_TIDS, len);
+ SVCMAP(WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
+ WMI_SERVICE_ARPNS_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_NLO,
+ WMI_SERVICE_NLO, len);
+ SVCMAP(WMI_MAIN_SERVICE_GTK_OFFLOAD,
+ WMI_SERVICE_GTK_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_SCAN_SCH,
+ WMI_SERVICE_SCAN_SCH, len);
+ SVCMAP(WMI_MAIN_SERVICE_CSA_OFFLOAD,
+ WMI_SERVICE_CSA_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_CHATTER,
+ WMI_SERVICE_CHATTER, len);
+ SVCMAP(WMI_MAIN_SERVICE_COEX_FREQAVOID,
+ WMI_SERVICE_COEX_FREQAVOID, len);
+ SVCMAP(WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
+ WMI_SERVICE_PACKET_POWER_SAVE, len);
+ SVCMAP(WMI_MAIN_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_FORCE_FW_HANG, len);
+ SVCMAP(WMI_MAIN_SERVICE_GPIO,
+ WMI_SERVICE_GPIO, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
+ WMI_SERVICE_STA_KEEP_ALIVE, len);
+ SVCMAP(WMI_MAIN_SERVICE_TX_ENCAP,
+ WMI_SERVICE_TX_ENCAP, len);
+}
+
+static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ SVCMAP(WMI_10_4_SERVICE_BEACON_OFFLOAD,
+ WMI_SERVICE_BEACON_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_ROAM_OFFLOAD,
+ WMI_SERVICE_ROAM_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_PWRSAVE, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+ SVCMAP(WMI_10_4_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_UAPSD, len);
+ SVCMAP(WMI_10_4_SERVICE_AP_DFS,
+ WMI_SERVICE_AP_DFS, len);
+ SVCMAP(WMI_10_4_SERVICE_11AC,
+ WMI_SERVICE_11AC, len);
+ SVCMAP(WMI_10_4_SERVICE_BLOCKACK,
+ WMI_SERVICE_BLOCKACK, len);
+ SVCMAP(WMI_10_4_SERVICE_PHYERR,
+ WMI_SERVICE_PHYERR, len);
+ SVCMAP(WMI_10_4_SERVICE_BCN_FILTER,
+ WMI_SERVICE_BCN_FILTER, len);
+ SVCMAP(WMI_10_4_SERVICE_RTT,
+ WMI_SERVICE_RTT, len);
+ SVCMAP(WMI_10_4_SERVICE_RATECTRL,
+ WMI_SERVICE_RATECTRL, len);
+ SVCMAP(WMI_10_4_SERVICE_WOW,
+ WMI_SERVICE_WOW, len);
+ SVCMAP(WMI_10_4_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_RATECTRL_CACHE, len);
+ SVCMAP(WMI_10_4_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_IRAM_TIDS, len);
+ SVCMAP(WMI_10_4_SERVICE_BURST,
+ WMI_SERVICE_BURST, len);
+ SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_GTK_OFFLOAD,
+ WMI_SERVICE_GTK_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_SCAN_SCH,
+ WMI_SERVICE_SCAN_SCH, len);
+ SVCMAP(WMI_10_4_SERVICE_CSA_OFFLOAD,
+ WMI_SERVICE_CSA_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_CHATTER,
+ WMI_SERVICE_CHATTER, len);
+ SVCMAP(WMI_10_4_SERVICE_COEX_FREQAVOID,
+ WMI_SERVICE_COEX_FREQAVOID, len);
+ SVCMAP(WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+ WMI_SERVICE_PACKET_POWER_SAVE, len);
+ SVCMAP(WMI_10_4_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_FORCE_FW_HANG, len);
+ SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_GPIO,
+ WMI_SERVICE_GPIO, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+ WMI_SERVICE_STA_KEEP_ALIVE, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_ENCAP,
+ WMI_SERVICE_TX_ENCAP, len);
+ SVCMAP(WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
+ SVCMAP(WMI_10_4_SERVICE_EARLY_RX,
+ WMI_SERVICE_EARLY_RX, len);
+ SVCMAP(WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+ WMI_SERVICE_ENHANCED_PROXY_STA, len);
+ SVCMAP(WMI_10_4_SERVICE_TT,
+ WMI_SERVICE_TT, len);
+ SVCMAP(WMI_10_4_SERVICE_ATF,
+ WMI_SERVICE_ATF, len);
+ SVCMAP(WMI_10_4_SERVICE_PEER_CACHING,
+ WMI_SERVICE_PEER_CACHING, len);
+ SVCMAP(WMI_10_4_SERVICE_COEX_GPIO,
+ WMI_SERVICE_COEX_GPIO, len);
+ SVCMAP(WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_SERVICE_AUX_SPECTRAL_INTF, len);
+ SVCMAP(WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_SERVICE_AUX_CHAN_LOAD_INTF, len);
+ SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
+ SVCMAP(WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_SERVICE_EXT_RES_CFG_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_MESH_NON_11S,
+ WMI_SERVICE_MESH_NON_11S, len);
+ SVCMAP(WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_SERVICE_RESTRT_CHNL_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_PEER_STATS,
+ WMI_SERVICE_PEER_STATS, len);
+ SVCMAP(WMI_10_4_SERVICE_MESH_11S,
+ WMI_SERVICE_MESH_11S, len);
+ SVCMAP(WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+ WMI_SERVICE_TX_MODE_PUSH_ONLY, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+ WMI_SERVICE_TX_MODE_PUSH_PULL, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
+ WMI_SERVICE_TX_MODE_DYNAMIC, len);
+ SVCMAP(WMI_10_4_SERVICE_VDEV_RX_FILTER,
+ WMI_SERVICE_VDEV_RX_FILTER, len);
+ SVCMAP(WMI_10_4_SERVICE_BTCOEX,
+ WMI_SERVICE_BTCOEX, len);
+ SVCMAP(WMI_10_4_SERVICE_CHECK_CAL_VERSION,
+ WMI_SERVICE_CHECK_CAL_VERSION, len);
+ SVCMAP(WMI_10_4_SERVICE_DBGLOG_WARN2,
+ WMI_SERVICE_DBGLOG_WARN2, len);
+ SVCMAP(WMI_10_4_SERVICE_BTCOEX_DUTY_CYCLE,
+ WMI_SERVICE_BTCOEX_DUTY_CYCLE, len);
+ SVCMAP(WMI_10_4_SERVICE_4_WIRE_COEX_SUPPORT,
+ WMI_SERVICE_4_WIRE_COEX_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_EXTENDED_NSS_SUPPORT,
+ WMI_SERVICE_EXTENDED_NSS_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_PROG_GPIO_BAND_SELECT,
+ WMI_SERVICE_PROG_GPIO_BAND_SELECT, len);
+ SVCMAP(WMI_10_4_SERVICE_SMART_LOGGING_SUPPORT,
+ WMI_SERVICE_SMART_LOGGING_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS,
+ WMI_SERVICE_TDLS, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS_OFFCHAN,
+ WMI_SERVICE_TDLS_OFFCHAN, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
+ WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+ WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
+ WMI_SERVICE_TDLS_WIDER_BANDWIDTH, len);
+ SVCMAP(WMI_10_4_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+ WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, len);
+ SVCMAP(WMI_10_4_SERVICE_HOST_DFS_CHECK_SUPPORT,
+ WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_TPC_STATS_FINAL,
+ WMI_SERVICE_TPC_STATS_FINAL, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_DATA_ACK_RSSI,
+ WMI_SERVICE_TX_DATA_ACK_RSSI, len);
+ SVCMAP(WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
+ WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT,
+ WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_RTT_RESPONDER_ROLE,
+ WMI_SERVICE_RTT_RESPONDER_ROLE, len);
+ SVCMAP(WMI_10_4_SERVICE_PER_PACKET_SW_ENCRYPT,
+ WMI_SERVICE_PER_PACKET_SW_ENCRYPT, len);
+ SVCMAP(WMI_10_4_SERVICE_REPORT_AIRTIME,
+ WMI_SERVICE_REPORT_AIRTIME, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_PWR_PER_PEER,
+ WMI_SERVICE_TX_PWR_PER_PEER, len);
+ SVCMAP(WMI_10_4_SERVICE_RESET_CHIP,
+ WMI_SERVICE_RESET_CHIP, len);
+ SVCMAP(WMI_10_4_SERVICE_PEER_TID_CONFIGS_SUPPORT,
+ WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
+ WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, len);
+}
+
+#undef SVCMAP
+
+/* 2 word representation of MAC addr */
+struct wmi_mac_addr {
+ union {
+ u8 addr[6];
+ struct {
+ u32 word0;
+ u32 word1;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_cmd_map {
+ u32 init_cmdid;
+ u32 start_scan_cmdid;
+ u32 stop_scan_cmdid;
+ u32 scan_chan_list_cmdid;
+ u32 scan_sch_prio_tbl_cmdid;
+ u32 scan_prob_req_oui_cmdid;
+ u32 pdev_set_regdomain_cmdid;
+ u32 pdev_set_channel_cmdid;
+ u32 pdev_set_param_cmdid;
+ u32 pdev_pktlog_enable_cmdid;
+ u32 pdev_pktlog_disable_cmdid;
+ u32 pdev_set_wmm_params_cmdid;
+ u32 pdev_set_ht_cap_ie_cmdid;
+ u32 pdev_set_vht_cap_ie_cmdid;
+ u32 pdev_set_dscp_tid_map_cmdid;
+ u32 pdev_set_quiet_mode_cmdid;
+ u32 pdev_green_ap_ps_enable_cmdid;
+ u32 pdev_get_tpc_config_cmdid;
+ u32 pdev_set_base_macaddr_cmdid;
+ u32 vdev_create_cmdid;
+ u32 vdev_delete_cmdid;
+ u32 vdev_start_request_cmdid;
+ u32 vdev_restart_request_cmdid;
+ u32 vdev_up_cmdid;
+ u32 vdev_stop_cmdid;
+ u32 vdev_down_cmdid;
+ u32 vdev_set_param_cmdid;
+ u32 vdev_install_key_cmdid;
+ u32 peer_create_cmdid;
+ u32 peer_delete_cmdid;
+ u32 peer_flush_tids_cmdid;
+ u32 peer_set_param_cmdid;
+ u32 peer_assoc_cmdid;
+ u32 peer_add_wds_entry_cmdid;
+ u32 peer_remove_wds_entry_cmdid;
+ u32 peer_mcast_group_cmdid;
+ u32 bcn_tx_cmdid;
+ u32 pdev_send_bcn_cmdid;
+ u32 bcn_tmpl_cmdid;
+ u32 bcn_filter_rx_cmdid;
+ u32 prb_req_filter_rx_cmdid;
+ u32 mgmt_tx_cmdid;
+ u32 mgmt_tx_send_cmdid;
+ u32 prb_tmpl_cmdid;
+ u32 addba_clear_resp_cmdid;
+ u32 addba_send_cmdid;
+ u32 addba_status_cmdid;
+ u32 delba_send_cmdid;
+ u32 addba_set_resp_cmdid;
+ u32 send_singleamsdu_cmdid;
+ u32 sta_powersave_mode_cmdid;
+ u32 sta_powersave_param_cmdid;
+ u32 sta_mimo_ps_mode_cmdid;
+ u32 pdev_dfs_enable_cmdid;
+ u32 pdev_dfs_disable_cmdid;
+ u32 roam_scan_mode;
+ u32 roam_scan_rssi_threshold;
+ u32 roam_scan_period;
+ u32 roam_scan_rssi_change_threshold;
+ u32 roam_ap_profile;
+ u32 ofl_scan_add_ap_profile;
+ u32 ofl_scan_remove_ap_profile;
+ u32 ofl_scan_period;
+ u32 p2p_dev_set_device_info;
+ u32 p2p_dev_set_discoverability;
+ u32 p2p_go_set_beacon_ie;
+ u32 p2p_go_set_probe_resp_ie;
+ u32 p2p_set_vendor_ie_data_cmdid;
+ u32 ap_ps_peer_param_cmdid;
+ u32 ap_ps_peer_uapsd_coex_cmdid;
+ u32 peer_rate_retry_sched_cmdid;
+ u32 wlan_profile_trigger_cmdid;
+ u32 wlan_profile_set_hist_intvl_cmdid;
+ u32 wlan_profile_get_profile_data_cmdid;
+ u32 wlan_profile_enable_profile_id_cmdid;
+ u32 wlan_profile_list_profile_id_cmdid;
+ u32 pdev_suspend_cmdid;
+ u32 pdev_resume_cmdid;
+ u32 add_bcn_filter_cmdid;
+ u32 rmv_bcn_filter_cmdid;
+ u32 wow_add_wake_pattern_cmdid;
+ u32 wow_del_wake_pattern_cmdid;
+ u32 wow_enable_disable_wake_event_cmdid;
+ u32 wow_enable_cmdid;
+ u32 wow_hostwakeup_from_sleep_cmdid;
+ u32 rtt_measreq_cmdid;
+ u32 rtt_tsf_cmdid;
+ u32 vdev_spectral_scan_configure_cmdid;
+ u32 vdev_spectral_scan_enable_cmdid;
+ u32 request_stats_cmdid;
+ u32 request_peer_stats_info_cmdid;
+ u32 set_arp_ns_offload_cmdid;
+ u32 network_list_offload_config_cmdid;
+ u32 gtk_offload_cmdid;
+ u32 csa_offload_enable_cmdid;
+ u32 csa_offload_chanswitch_cmdid;
+ u32 chatter_set_mode_cmdid;
+ u32 peer_tid_addba_cmdid;
+ u32 peer_tid_delba_cmdid;
+ u32 sta_dtim_ps_method_cmdid;
+ u32 sta_uapsd_auto_trig_cmdid;
+ u32 sta_keepalive_cmd;
+ u32 echo_cmdid;
+ u32 pdev_utf_cmdid;
+ u32 dbglog_cfg_cmdid;
+ u32 pdev_qvit_cmdid;
+ u32 pdev_ftm_intg_cmdid;
+ u32 vdev_set_keepalive_cmdid;
+ u32 vdev_get_keepalive_cmdid;
+ u32 force_fw_hang_cmdid;
+ u32 gpio_config_cmdid;
+ u32 gpio_output_cmdid;
+ u32 pdev_get_temperature_cmdid;
+ u32 vdev_set_wmm_params_cmdid;
+ u32 tdls_set_state_cmdid;
+ u32 tdls_peer_update_cmdid;
+ u32 adaptive_qcs_cmdid;
+ u32 scan_update_request_cmdid;
+ u32 vdev_standby_response_cmdid;
+ u32 vdev_resume_response_cmdid;
+ u32 wlan_peer_caching_add_peer_cmdid;
+ u32 wlan_peer_caching_evict_peer_cmdid;
+ u32 wlan_peer_caching_restore_peer_cmdid;
+ u32 wlan_peer_caching_print_all_peers_info_cmdid;
+ u32 peer_update_wds_entry_cmdid;
+ u32 peer_add_proxy_sta_entry_cmdid;
+ u32 rtt_keepalive_cmdid;
+ u32 oem_req_cmdid;
+ u32 nan_cmdid;
+ u32 vdev_ratemask_cmdid;
+ u32 qboost_cfg_cmdid;
+ u32 pdev_smart_ant_enable_cmdid;
+ u32 pdev_smart_ant_set_rx_antenna_cmdid;
+ u32 peer_smart_ant_set_tx_antenna_cmdid;
+ u32 peer_smart_ant_set_train_info_cmdid;
+ u32 peer_smart_ant_set_node_config_ops_cmdid;
+ u32 pdev_set_antenna_switch_table_cmdid;
+ u32 pdev_set_ctl_table_cmdid;
+ u32 pdev_set_mimogain_table_cmdid;
+ u32 pdev_ratepwr_table_cmdid;
+ u32 pdev_ratepwr_chainmsk_table_cmdid;
+ u32 pdev_fips_cmdid;
+ u32 tt_set_conf_cmdid;
+ u32 fwtest_cmdid;
+ u32 vdev_atf_request_cmdid;
+ u32 peer_atf_request_cmdid;
+ u32 pdev_get_ani_cck_config_cmdid;
+ u32 pdev_get_ani_ofdm_config_cmdid;
+ u32 pdev_reserve_ast_entry_cmdid;
+ u32 pdev_get_nfcal_power_cmdid;
+ u32 pdev_get_tpc_cmdid;
+ u32 pdev_get_ast_info_cmdid;
+ u32 vdev_set_dscp_tid_map_cmdid;
+ u32 pdev_get_info_cmdid;
+ u32 vdev_get_info_cmdid;
+ u32 vdev_filter_neighbor_rx_packets_cmdid;
+ u32 mu_cal_start_cmdid;
+ u32 set_cca_params_cmdid;
+ u32 pdev_bss_chan_info_request_cmdid;
+ u32 pdev_enable_adaptive_cca_cmdid;
+ u32 ext_resource_cfg_cmdid;
+ u32 vdev_set_ie_cmdid;
+ u32 set_lteu_config_cmdid;
+ u32 atf_ssid_grouping_request_cmdid;
+ u32 peer_atf_ext_request_cmdid;
+ u32 set_periodic_channel_stats_cfg_cmdid;
+ u32 peer_bwf_request_cmdid;
+ u32 btcoex_cfg_cmdid;
+ u32 peer_tx_mu_txmit_count_cmdid;
+ u32 peer_tx_mu_txmit_rstcnt_cmdid;
+ u32 peer_gid_userpos_list_cmdid;
+ u32 pdev_check_cal_version_cmdid;
+ u32 coex_version_cfg_cmid;
+ u32 pdev_get_rx_filter_cmdid;
+ u32 pdev_extended_nss_cfg_cmdid;
+ u32 vdev_set_scan_nac_rssi_cmdid;
+ u32 prog_gpio_band_select_cmdid;
+ u32 config_smart_logging_cmdid;
+ u32 debug_fatal_condition_cmdid;
+ u32 get_tsf_timer_cmdid;
+ u32 pdev_get_tpc_table_cmdid;
+ u32 vdev_sifs_trigger_time_cmdid;
+ u32 pdev_wds_entry_list_cmdid;
+ u32 tdls_set_offchan_mode_cmdid;
+ u32 radar_found_cmdid;
+ u32 set_bb_timing_cmdid;
+ u32 per_peer_per_tid_config_cmdid;
+};
+
+/*
+ * wmi command groups.
+ */
+enum wmi_cmd_group {
+ /* 0 to 2 are reserved */
+ WMI_GRP_START = 0x3,
+ WMI_GRP_SCAN = WMI_GRP_START,
+ WMI_GRP_PDEV,
+ WMI_GRP_VDEV,
+ WMI_GRP_PEER,
+ WMI_GRP_MGMT,
+ WMI_GRP_BA_NEG,
+ WMI_GRP_STA_PS,
+ WMI_GRP_DFS,
+ WMI_GRP_ROAM,
+ WMI_GRP_OFL_SCAN,
+ WMI_GRP_P2P,
+ WMI_GRP_AP_PS,
+ WMI_GRP_RATE_CTRL,
+ WMI_GRP_PROFILE,
+ WMI_GRP_SUSPEND,
+ WMI_GRP_BCN_FILTER,
+ WMI_GRP_WOW,
+ WMI_GRP_RTT,
+ WMI_GRP_SPECTRAL,
+ WMI_GRP_STATS,
+ WMI_GRP_ARP_NS_OFL,
+ WMI_GRP_NLO_OFL,
+ WMI_GRP_GTK_OFL,
+ WMI_GRP_CSA_OFL,
+ WMI_GRP_CHATTER,
+ WMI_GRP_TID_ADDBA,
+ WMI_GRP_MISC,
+ WMI_GRP_GPIO,
+};
+
+#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
+
+#define WMI_CMD_UNSUPPORTED 0
+
+/* Command IDs and command events for MAIN FW. */
+enum wmi_cmd_id {
+ WMI_INIT_CMDID = 0x1,
+
+ /* Scan specific commands */
+ WMI_START_SCAN_CMDID = WMI_CMD_GRP(WMI_GRP_SCAN),
+ WMI_STOP_SCAN_CMDID,
+ WMI_SCAN_CHAN_LIST_CMDID,
+ WMI_SCAN_SCH_PRIO_TBL_CMDID,
+
+ /* PDEV (physical device) specific commands */
+ WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_CMD_GRP(WMI_GRP_PDEV),
+ WMI_PDEV_SET_CHANNEL_CMDID,
+ WMI_PDEV_SET_PARAM_CMDID,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_PDEV_SET_BASE_MACADDR_CMDID,
+
+ /* VDEV (virtual device) specific commands */
+ WMI_VDEV_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_VDEV),
+ WMI_VDEV_DELETE_CMDID,
+ WMI_VDEV_START_REQUEST_CMDID,
+ WMI_VDEV_RESTART_REQUEST_CMDID,
+ WMI_VDEV_UP_CMDID,
+ WMI_VDEV_STOP_CMDID,
+ WMI_VDEV_DOWN_CMDID,
+ WMI_VDEV_SET_PARAM_CMDID,
+ WMI_VDEV_INSTALL_KEY_CMDID,
+
+ /* peer specific commands */
+ WMI_PEER_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_PEER),
+ WMI_PEER_DELETE_CMDID,
+ WMI_PEER_FLUSH_TIDS_CMDID,
+ WMI_PEER_SET_PARAM_CMDID,
+ WMI_PEER_ASSOC_CMDID,
+ WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_PEER_MCAST_GROUP_CMDID,
+
+ /* beacon/management specific commands */
+ WMI_BCN_TX_CMDID = WMI_CMD_GRP(WMI_GRP_MGMT),
+ WMI_PDEV_SEND_BCN_CMDID,
+ WMI_BCN_TMPL_CMDID,
+ WMI_BCN_FILTER_RX_CMDID,
+ WMI_PRB_REQ_FILTER_RX_CMDID,
+ WMI_MGMT_TX_CMDID,
+ WMI_PRB_TMPL_CMDID,
+
+ /* commands to directly control BA negotiation directly from host. */
+ WMI_ADDBA_CLEAR_RESP_CMDID = WMI_CMD_GRP(WMI_GRP_BA_NEG),
+ WMI_ADDBA_SEND_CMDID,
+ WMI_ADDBA_STATUS_CMDID,
+ WMI_DELBA_SEND_CMDID,
+ WMI_ADDBA_SET_RESP_CMDID,
+ WMI_SEND_SINGLEAMSDU_CMDID,
+
+ /* Station power save specific config */
+ WMI_STA_POWERSAVE_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_STA_PS),
+ WMI_STA_POWERSAVE_PARAM_CMDID,
+ WMI_STA_MIMO_PS_MODE_CMDID,
+
+ /** DFS-specific commands */
+ WMI_PDEV_DFS_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_DFS),
+ WMI_PDEV_DFS_DISABLE_CMDID,
+
+ /* Roaming specific commands */
+ WMI_ROAM_SCAN_MODE = WMI_CMD_GRP(WMI_GRP_ROAM),
+ WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_ROAM_SCAN_PERIOD,
+ WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_ROAM_AP_PROFILE,
+
+ /* offload scan specific commands */
+ WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_CMD_GRP(WMI_GRP_OFL_SCAN),
+ WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_OFL_SCAN_PERIOD,
+
+ /* P2P specific commands */
+ WMI_P2P_DEV_SET_DEVICE_INFO = WMI_CMD_GRP(WMI_GRP_P2P),
+ WMI_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_P2P_GO_SET_BEACON_IE,
+ WMI_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+
+ /* AP power save specific config */
+ WMI_AP_PS_PEER_PARAM_CMDID = WMI_CMD_GRP(WMI_GRP_AP_PS),
+ WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+ /* Rate-control specific commands */
+ WMI_PEER_RATE_RETRY_SCHED_CMDID =
+ WMI_CMD_GRP(WMI_GRP_RATE_CTRL),
+
+ /* WLAN Profiling commands. */
+ WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_CMD_GRP(WMI_GRP_PROFILE),
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+ /* Suspend resume command Ids */
+ WMI_PDEV_SUSPEND_CMDID = WMI_CMD_GRP(WMI_GRP_SUSPEND),
+ WMI_PDEV_RESUME_CMDID,
+
+ /* Beacon filter commands */
+ WMI_ADD_BCN_FILTER_CMDID = WMI_CMD_GRP(WMI_GRP_BCN_FILTER),
+ WMI_RMV_BCN_FILTER_CMDID,
+
+ /* WOW Specific WMI commands*/
+ WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_CMD_GRP(WMI_GRP_WOW),
+ WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_WOW_ENABLE_CMDID,
+ WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+ /* RTT measurement related cmd */
+ WMI_RTT_MEASREQ_CMDID = WMI_CMD_GRP(WMI_GRP_RTT),
+ WMI_RTT_TSF_CMDID,
+
+ /* spectral scan commands */
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_CMD_GRP(WMI_GRP_SPECTRAL),
+ WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+
+ /* F/W stats */
+ WMI_REQUEST_STATS_CMDID = WMI_CMD_GRP(WMI_GRP_STATS),
+
+ /* ARP OFFLOAD REQUEST*/
+ WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_ARP_NS_OFL),
+
+ /* NS offload confid*/
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_NLO_OFL),
+
+ /* GTK offload Specific WMI commands*/
+ WMI_GTK_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_GTK_OFL),
+
+ /* CSA offload Specific WMI commands*/
+ WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_CSA_OFL),
+ WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+
+ /* Chatter commands*/
+ WMI_CHATTER_SET_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_CHATTER),
+
+ /* addba specific commands */
+ WMI_PEER_TID_ADDBA_CMDID = WMI_CMD_GRP(WMI_GRP_TID_ADDBA),
+ WMI_PEER_TID_DELBA_CMDID,
+
+ /* set station mimo powersave method */
+ WMI_STA_DTIM_PS_METHOD_CMDID,
+ /* Configure the Station UAPSD AC Auto Trigger Parameters */
+ WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+
+ /* STA Keep alive parameter configuration,
+ * Requires WMI_SERVICE_STA_KEEP_ALIVE
+ */
+ WMI_STA_KEEPALIVE_CMD,
+
+ /* misc command group */
+ WMI_ECHO_CMDID = WMI_CMD_GRP(WMI_GRP_MISC),
+ WMI_PDEV_UTF_CMDID,
+ WMI_DBGLOG_CFG_CMDID,
+ WMI_PDEV_QVIT_CMDID,
+ WMI_PDEV_FTM_INTG_CMDID,
+ WMI_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_FORCE_FW_HANG_CMDID,
+
+ /* GPIO Configuration */
+ WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO),
+ WMI_GPIO_OUTPUT_CMDID,
+};
+
+enum wmi_event_id {
+ WMI_SERVICE_READY_EVENTID = 0x1,
+ WMI_READY_EVENTID,
+ WMI_SERVICE_AVAILABLE_EVENTID,
+
+ /* Scan specific events */
+ WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+
+ /* PDEV specific events */
+ WMI_PDEV_TPC_CONFIG_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PDEV),
+ WMI_CHAN_INFO_EVENTID,
+ WMI_PHYERR_EVENTID,
+
+ /* VDEV specific events */
+ WMI_VDEV_START_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_VDEV),
+ WMI_VDEV_STOPPED_EVENTID,
+ WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+
+ /* peer specific events */
+ WMI_PEER_STA_KICKOUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PEER),
+
+ /* beacon/mgmt specific events */
+ WMI_MGMT_RX_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MGMT),
+ WMI_HOST_SWBA_EVENTID,
+ WMI_TBTTOFFSET_UPDATE_EVENTID,
+
+ /* ADDBA Related WMI Events*/
+ WMI_TX_DELBA_COMPLETE_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_BA_NEG),
+ WMI_TX_ADDBA_COMPLETE_EVENTID,
+
+ /* Roam event to trigger roaming on host */
+ WMI_ROAM_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_ROAM),
+ WMI_PROFILE_MATCH,
+
+ /* WoW */
+ WMI_WOW_WAKEUP_HOST_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_WOW),
+
+ /* RTT */
+ WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_RTT),
+ WMI_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_RTT_ERROR_REPORT_EVENTID,
+
+ /* GTK offload */
+ WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GTK_OFL),
+ WMI_GTK_REKEY_FAIL_EVENTID,
+
+ /* CSA IE received event */
+ WMI_CSA_HANDLING_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_CSA_OFL),
+
+ /* Misc events */
+ WMI_ECHO_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MISC),
+ WMI_PDEV_UTF_EVENTID,
+ WMI_DEBUG_MESG_EVENTID,
+ WMI_UPDATE_STATS_EVENTID,
+ WMI_DEBUG_PRINT_EVENTID,
+ WMI_DCS_INTERFERENCE_EVENTID,
+ WMI_PDEV_QVIT_EVENTID,
+ WMI_WLAN_PROFILE_DATA_EVENTID,
+ WMI_PDEV_FTM_INTG_EVENTID,
+ WMI_WLAN_FREQ_AVOID_EVENTID,
+ WMI_VDEV_GET_KEEPALIVE_EVENTID,
+
+ /* GPIO Event */
+ WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
+};
+
+/* Command IDs and command events for 10.X firmware */
+enum wmi_10x_cmd_id {
+ WMI_10X_START_CMDID = 0x9000,
+ WMI_10X_END_CMDID = 0x9FFF,
+
+ /* initialize the wlan sub system */
+ WMI_10X_INIT_CMDID,
+
+ /* Scan specific commands */
+
+ WMI_10X_START_SCAN_CMDID = WMI_10X_START_CMDID,
+ WMI_10X_STOP_SCAN_CMDID,
+ WMI_10X_SCAN_CHAN_LIST_CMDID,
+ WMI_10X_ECHO_CMDID,
+
+ /* PDEV(physical device) specific commands */
+ WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+ WMI_10X_PDEV_SET_CHANNEL_CMDID,
+ WMI_10X_PDEV_SET_PARAM_CMDID,
+ WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+
+ /* VDEV(virtual device) specific commands */
+ WMI_10X_VDEV_CREATE_CMDID,
+ WMI_10X_VDEV_DELETE_CMDID,
+ WMI_10X_VDEV_START_REQUEST_CMDID,
+ WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+ WMI_10X_VDEV_UP_CMDID,
+ WMI_10X_VDEV_STOP_CMDID,
+ WMI_10X_VDEV_DOWN_CMDID,
+ WMI_10X_VDEV_STANDBY_RESPONSE_CMDID,
+ WMI_10X_VDEV_RESUME_RESPONSE_CMDID,
+ WMI_10X_VDEV_SET_PARAM_CMDID,
+ WMI_10X_VDEV_INSTALL_KEY_CMDID,
+
+ /* peer specific commands */
+ WMI_10X_PEER_CREATE_CMDID,
+ WMI_10X_PEER_DELETE_CMDID,
+ WMI_10X_PEER_FLUSH_TIDS_CMDID,
+ WMI_10X_PEER_SET_PARAM_CMDID,
+ WMI_10X_PEER_ASSOC_CMDID,
+ WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_10X_PEER_MCAST_GROUP_CMDID,
+
+ /* beacon/management specific commands */
+
+ WMI_10X_BCN_TX_CMDID,
+ WMI_10X_BCN_PRB_TMPL_CMDID,
+ WMI_10X_BCN_FILTER_RX_CMDID,
+ WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+ WMI_10X_MGMT_TX_CMDID,
+
+ /* commands to directly control ba negotiation directly from host. */
+ WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+ WMI_10X_ADDBA_SEND_CMDID,
+ WMI_10X_ADDBA_STATUS_CMDID,
+ WMI_10X_DELBA_SEND_CMDID,
+ WMI_10X_ADDBA_SET_RESP_CMDID,
+ WMI_10X_SEND_SINGLEAMSDU_CMDID,
+
+ /* Station power save specific config */
+ WMI_10X_STA_POWERSAVE_MODE_CMDID,
+ WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+ WMI_10X_STA_MIMO_PS_MODE_CMDID,
+
+ /* set debug log config */
+ WMI_10X_DBGLOG_CFG_CMDID,
+
+ /* DFS-specific commands */
+ WMI_10X_PDEV_DFS_ENABLE_CMDID,
+ WMI_10X_PDEV_DFS_DISABLE_CMDID,
+
+ /* QVIT specific command id */
+ WMI_10X_PDEV_QVIT_CMDID,
+
+ /* Offload Scan and Roaming related commands */
+ WMI_10X_ROAM_SCAN_MODE,
+ WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_10X_ROAM_SCAN_PERIOD,
+ WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_10X_ROAM_AP_PROFILE,
+ WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+ WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_10X_OFL_SCAN_PERIOD,
+
+ /* P2P specific commands */
+ WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+ WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_10X_P2P_GO_SET_BEACON_IE,
+ WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+
+ /* AP power save specific config */
+ WMI_10X_AP_PS_PEER_PARAM_CMDID,
+ WMI_10X_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+ /* Rate-control specific commands */
+ WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+
+ /* WLAN Profiling commands. */
+ WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+ WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+ /* Suspend resume command Ids */
+ WMI_10X_PDEV_SUSPEND_CMDID,
+ WMI_10X_PDEV_RESUME_CMDID,
+
+ /* Beacon filter commands */
+ WMI_10X_ADD_BCN_FILTER_CMDID,
+ WMI_10X_RMV_BCN_FILTER_CMDID,
+
+ /* WOW Specific WMI commands*/
+ WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+ WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_10X_WOW_ENABLE_CMDID,
+ WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+ /* RTT measurement related cmd */
+ WMI_10X_RTT_MEASREQ_CMDID,
+ WMI_10X_RTT_TSF_CMDID,
+
+ /* transmit beacon by value */
+ WMI_10X_PDEV_SEND_BCN_CMDID,
+
+ /* F/W stats */
+ WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_10X_REQUEST_STATS_CMDID,
+
+ /* GPIO Configuration */
+ WMI_10X_GPIO_CONFIG_CMDID,
+ WMI_10X_GPIO_OUTPUT_CMDID,
+
+ WMI_10X_PDEV_UTF_CMDID = WMI_10X_END_CMDID - 1,
+};
+
+enum wmi_10x_event_id {
+ WMI_10X_SERVICE_READY_EVENTID = 0x8000,
+ WMI_10X_READY_EVENTID,
+ WMI_10X_START_EVENTID = 0x9000,
+ WMI_10X_END_EVENTID = 0x9FFF,
+
+ /* Scan specific events */
+ WMI_10X_SCAN_EVENTID = WMI_10X_START_EVENTID,
+ WMI_10X_ECHO_EVENTID,
+ WMI_10X_DEBUG_MESG_EVENTID,
+ WMI_10X_UPDATE_STATS_EVENTID,
+
+ /* Instantaneous RSSI event */
+ WMI_10X_INST_RSSI_STATS_EVENTID,
+
+ /* VDEV specific events */
+ WMI_10X_VDEV_START_RESP_EVENTID,
+ WMI_10X_VDEV_STANDBY_REQ_EVENTID,
+ WMI_10X_VDEV_RESUME_REQ_EVENTID,
+ WMI_10X_VDEV_STOPPED_EVENTID,
+
+ /* peer specific events */
+ WMI_10X_PEER_STA_KICKOUT_EVENTID,
+
+ /* beacon/mgmt specific events */
+ WMI_10X_HOST_SWBA_EVENTID,
+ WMI_10X_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_10X_MGMT_RX_EVENTID,
+
+ /* Channel stats event */
+ WMI_10X_CHAN_INFO_EVENTID,
+
+ /* PHY Error specific WMI event */
+ WMI_10X_PHYERR_EVENTID,
+
+ /* Roam event to trigger roaming on host */
+ WMI_10X_ROAM_EVENTID,
+
+ /* matching AP found from list of profiles */
+ WMI_10X_PROFILE_MATCH,
+
+ /* debug print message used for tracing FW code while debugging */
+ WMI_10X_DEBUG_PRINT_EVENTID,
+ /* VI spoecific event */
+ WMI_10X_PDEV_QVIT_EVENTID,
+ /* FW code profile data in response to profile request */
+ WMI_10X_WLAN_PROFILE_DATA_EVENTID,
+
+ /*RTT related event ID*/
+ WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID,
+ WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_10X_RTT_ERROR_REPORT_EVENTID,
+
+ WMI_10X_WOW_WAKEUP_HOST_EVENTID,
+ WMI_10X_DCS_INTERFERENCE_EVENTID,
+
+ /* TPC config for the current operating channel */
+ WMI_10X_PDEV_TPC_CONFIG_EVENTID,
+
+ WMI_10X_GPIO_INPUT_EVENTID,
+ WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID - 1,
+};
+
+enum wmi_10_2_cmd_id {
+ WMI_10_2_START_CMDID = 0x9000,
+ WMI_10_2_END_CMDID = 0x9FFF,
+ WMI_10_2_INIT_CMDID,
+ WMI_10_2_START_SCAN_CMDID = WMI_10_2_START_CMDID,
+ WMI_10_2_STOP_SCAN_CMDID,
+ WMI_10_2_SCAN_CHAN_LIST_CMDID,
+ WMI_10_2_ECHO_CMDID,
+ WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
+ WMI_10_2_PDEV_SET_CHANNEL_CMDID,
+ WMI_10_2_PDEV_SET_PARAM_CMDID,
+ WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_10_2_VDEV_CREATE_CMDID,
+ WMI_10_2_VDEV_DELETE_CMDID,
+ WMI_10_2_VDEV_START_REQUEST_CMDID,
+ WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
+ WMI_10_2_VDEV_UP_CMDID,
+ WMI_10_2_VDEV_STOP_CMDID,
+ WMI_10_2_VDEV_DOWN_CMDID,
+ WMI_10_2_VDEV_STANDBY_RESPONSE_CMDID,
+ WMI_10_2_VDEV_RESUME_RESPONSE_CMDID,
+ WMI_10_2_VDEV_SET_PARAM_CMDID,
+ WMI_10_2_VDEV_INSTALL_KEY_CMDID,
+ WMI_10_2_VDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10_2_PEER_CREATE_CMDID,
+ WMI_10_2_PEER_DELETE_CMDID,
+ WMI_10_2_PEER_FLUSH_TIDS_CMDID,
+ WMI_10_2_PEER_SET_PARAM_CMDID,
+ WMI_10_2_PEER_ASSOC_CMDID,
+ WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_10_2_PEER_UPDATE_WDS_ENTRY_CMDID,
+ WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_10_2_PEER_MCAST_GROUP_CMDID,
+ WMI_10_2_BCN_TX_CMDID,
+ WMI_10_2_BCN_PRB_TMPL_CMDID,
+ WMI_10_2_BCN_FILTER_RX_CMDID,
+ WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
+ WMI_10_2_MGMT_TX_CMDID,
+ WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
+ WMI_10_2_ADDBA_SEND_CMDID,
+ WMI_10_2_ADDBA_STATUS_CMDID,
+ WMI_10_2_DELBA_SEND_CMDID,
+ WMI_10_2_ADDBA_SET_RESP_CMDID,
+ WMI_10_2_SEND_SINGLEAMSDU_CMDID,
+ WMI_10_2_STA_POWERSAVE_MODE_CMDID,
+ WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
+ WMI_10_2_STA_MIMO_PS_MODE_CMDID,
+ WMI_10_2_DBGLOG_CFG_CMDID,
+ WMI_10_2_PDEV_DFS_ENABLE_CMDID,
+ WMI_10_2_PDEV_DFS_DISABLE_CMDID,
+ WMI_10_2_PDEV_QVIT_CMDID,
+ WMI_10_2_ROAM_SCAN_MODE,
+ WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_10_2_ROAM_SCAN_PERIOD,
+ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_10_2_ROAM_AP_PROFILE,
+ WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
+ WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_10_2_OFL_SCAN_PERIOD,
+ WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
+ WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_10_2_P2P_GO_SET_BEACON_IE,
+ WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_10_2_AP_PS_PEER_PARAM_CMDID,
+ WMI_10_2_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
+ WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
+ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_10_2_PDEV_SUSPEND_CMDID,
+ WMI_10_2_PDEV_RESUME_CMDID,
+ WMI_10_2_ADD_BCN_FILTER_CMDID,
+ WMI_10_2_RMV_BCN_FILTER_CMDID,
+ WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
+ WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_10_2_WOW_ENABLE_CMDID,
+ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_10_2_RTT_MEASREQ_CMDID,
+ WMI_10_2_RTT_TSF_CMDID,
+ WMI_10_2_RTT_KEEPALIVE_CMDID,
+ WMI_10_2_PDEV_SEND_BCN_CMDID,
+ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_10_2_REQUEST_STATS_CMDID,
+ WMI_10_2_GPIO_CONFIG_CMDID,
+ WMI_10_2_GPIO_OUTPUT_CMDID,
+ WMI_10_2_VDEV_RATEMASK_CMDID,
+ WMI_10_2_PDEV_SMART_ANT_ENABLE_CMDID,
+ WMI_10_2_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ WMI_10_2_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ WMI_10_2_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ WMI_10_2_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ WMI_10_2_FORCE_FW_HANG_CMDID,
+ WMI_10_2_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ WMI_10_2_PDEV_SET_CTL_TABLE_CMDID,
+ WMI_10_2_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ WMI_10_2_PDEV_RATEPWR_TABLE_CMDID,
+ WMI_10_2_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+ WMI_10_2_PDEV_GET_INFO,
+ WMI_10_2_VDEV_GET_INFO,
+ WMI_10_2_VDEV_ATF_REQUEST_CMDID,
+ WMI_10_2_PEER_ATF_REQUEST_CMDID,
+ WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_10_2_MU_CAL_START_CMDID,
+ WMI_10_2_SET_LTEU_CONFIG_CMDID,
+ WMI_10_2_SET_CCA_PARAMS,
+ WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ WMI_10_2_FWTEST_CMDID,
+ WMI_10_2_PDEV_SET_BB_TIMING_CONFIG_CMDID,
+ WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
+};
+
+enum wmi_10_2_event_id {
+ WMI_10_2_SERVICE_READY_EVENTID = 0x8000,
+ WMI_10_2_READY_EVENTID,
+ WMI_10_2_DEBUG_MESG_EVENTID,
+ WMI_10_2_START_EVENTID = 0x9000,
+ WMI_10_2_END_EVENTID = 0x9FFF,
+ WMI_10_2_SCAN_EVENTID = WMI_10_2_START_EVENTID,
+ WMI_10_2_ECHO_EVENTID,
+ WMI_10_2_UPDATE_STATS_EVENTID,
+ WMI_10_2_INST_RSSI_STATS_EVENTID,
+ WMI_10_2_VDEV_START_RESP_EVENTID,
+ WMI_10_2_VDEV_STANDBY_REQ_EVENTID,
+ WMI_10_2_VDEV_RESUME_REQ_EVENTID,
+ WMI_10_2_VDEV_STOPPED_EVENTID,
+ WMI_10_2_PEER_STA_KICKOUT_EVENTID,
+ WMI_10_2_HOST_SWBA_EVENTID,
+ WMI_10_2_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_10_2_MGMT_RX_EVENTID,
+ WMI_10_2_CHAN_INFO_EVENTID,
+ WMI_10_2_PHYERR_EVENTID,
+ WMI_10_2_ROAM_EVENTID,
+ WMI_10_2_PROFILE_MATCH,
+ WMI_10_2_DEBUG_PRINT_EVENTID,
+ WMI_10_2_PDEV_QVIT_EVENTID,
+ WMI_10_2_WLAN_PROFILE_DATA_EVENTID,
+ WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_2_RTT_ERROR_REPORT_EVENTID,
+ WMI_10_2_RTT_KEEPALIVE_EVENTID,
+ WMI_10_2_WOW_WAKEUP_HOST_EVENTID,
+ WMI_10_2_DCS_INTERFERENCE_EVENTID,
+ WMI_10_2_PDEV_TPC_CONFIG_EVENTID,
+ WMI_10_2_GPIO_INPUT_EVENTID,
+ WMI_10_2_PEER_RATECODE_LIST_EVENTID,
+ WMI_10_2_GENERIC_BUFFER_EVENTID,
+ WMI_10_2_MCAST_BUF_RELEASE_EVENTID,
+ WMI_10_2_MCAST_LIST_AGEOUT_EVENTID,
+ WMI_10_2_WDS_PEER_EVENTID,
+ WMI_10_2_PEER_STA_PS_STATECHG_EVENTID,
+ WMI_10_2_PDEV_TEMPERATURE_EVENTID,
+ WMI_10_2_MU_REPORT_EVENTID,
+ WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID,
+ WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
+};
+
+enum wmi_10_4_cmd_id {
+ WMI_10_4_START_CMDID = 0x9000,
+ WMI_10_4_END_CMDID = 0x9FFF,
+ WMI_10_4_INIT_CMDID,
+ WMI_10_4_START_SCAN_CMDID = WMI_10_4_START_CMDID,
+ WMI_10_4_STOP_SCAN_CMDID,
+ WMI_10_4_SCAN_CHAN_LIST_CMDID,
+ WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+ WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+ WMI_10_4_ECHO_CMDID,
+ WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+ WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+ WMI_10_4_PDEV_SET_PARAM_CMDID,
+ WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_10_4_VDEV_CREATE_CMDID,
+ WMI_10_4_VDEV_DELETE_CMDID,
+ WMI_10_4_VDEV_START_REQUEST_CMDID,
+ WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+ WMI_10_4_VDEV_UP_CMDID,
+ WMI_10_4_VDEV_STOP_CMDID,
+ WMI_10_4_VDEV_DOWN_CMDID,
+ WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+ WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+ WMI_10_4_VDEV_SET_PARAM_CMDID,
+ WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+ WMI_10_4_PEER_CREATE_CMDID,
+ WMI_10_4_PEER_DELETE_CMDID,
+ WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+ WMI_10_4_PEER_SET_PARAM_CMDID,
+ WMI_10_4_PEER_ASSOC_CMDID,
+ WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+ WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+ WMI_10_4_PEER_MCAST_GROUP_CMDID,
+ WMI_10_4_BCN_TX_CMDID,
+ WMI_10_4_PDEV_SEND_BCN_CMDID,
+ WMI_10_4_BCN_PRB_TMPL_CMDID,
+ WMI_10_4_BCN_FILTER_RX_CMDID,
+ WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+ WMI_10_4_MGMT_TX_CMDID,
+ WMI_10_4_PRB_TMPL_CMDID,
+ WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+ WMI_10_4_ADDBA_SEND_CMDID,
+ WMI_10_4_ADDBA_STATUS_CMDID,
+ WMI_10_4_DELBA_SEND_CMDID,
+ WMI_10_4_ADDBA_SET_RESP_CMDID,
+ WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+ WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+ WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+ WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+ WMI_10_4_DBGLOG_CFG_CMDID,
+ WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+ WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+ WMI_10_4_PDEV_QVIT_CMDID,
+ WMI_10_4_ROAM_SCAN_MODE,
+ WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_10_4_ROAM_SCAN_PERIOD,
+ WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_10_4_ROAM_AP_PROFILE,
+ WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+ WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_10_4_OFL_SCAN_PERIOD,
+ WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+ WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_10_4_P2P_GO_SET_BEACON_IE,
+ WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+ WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+ WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+ WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+ WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_10_4_PDEV_SUSPEND_CMDID,
+ WMI_10_4_PDEV_RESUME_CMDID,
+ WMI_10_4_ADD_BCN_FILTER_CMDID,
+ WMI_10_4_RMV_BCN_FILTER_CMDID,
+ WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+ WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_10_4_WOW_ENABLE_CMDID,
+ WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_10_4_RTT_MEASREQ_CMDID,
+ WMI_10_4_RTT_TSF_CMDID,
+ WMI_10_4_RTT_KEEPALIVE_CMDID,
+ WMI_10_4_OEM_REQ_CMDID,
+ WMI_10_4_NAN_CMDID,
+ WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_10_4_REQUEST_STATS_CMDID,
+ WMI_10_4_GPIO_CONFIG_CMDID,
+ WMI_10_4_GPIO_OUTPUT_CMDID,
+ WMI_10_4_VDEV_RATEMASK_CMDID,
+ WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+ WMI_10_4_GTK_OFFLOAD_CMDID,
+ WMI_10_4_QBOOST_CFG_CMDID,
+ WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+ WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_10_4_FORCE_FW_HANG_CMDID,
+ WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+ WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+ WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+ WMI_10_4_PDEV_FIPS_CMDID,
+ WMI_10_4_TT_SET_CONF_CMDID,
+ WMI_10_4_FWTEST_CMDID,
+ WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+ WMI_10_4_PEER_ATF_REQUEST_CMDID,
+ WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+ WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+ WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+ WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+ WMI_10_4_PDEV_GET_TPC_CMDID,
+ WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+ WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_10_4_PDEV_GET_INFO_CMDID,
+ WMI_10_4_VDEV_GET_INFO_CMDID,
+ WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+ WMI_10_4_MU_CAL_START_CMDID,
+ WMI_10_4_SET_CCA_PARAMS_CMDID,
+ WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ WMI_10_4_EXT_RESOURCE_CFG_CMDID,
+ WMI_10_4_VDEV_SET_IE_CMDID,
+ WMI_10_4_SET_LTEU_CONFIG_CMDID,
+ WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID,
+ WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID,
+ WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+ WMI_10_4_PEER_BWF_REQUEST_CMDID,
+ WMI_10_4_BTCOEX_CFG_CMDID,
+ WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID,
+ WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID,
+ WMI_10_4_PEER_GID_USERPOS_LIST_CMDID,
+ WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID,
+ WMI_10_4_COEX_VERSION_CFG_CMID,
+ WMI_10_4_PDEV_GET_RX_FILTER_CMDID,
+ WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID,
+ WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID,
+ WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID,
+ WMI_10_4_CONFIG_SMART_LOGGING_CMDID,
+ WMI_10_4_DEBUG_FATAL_CONDITION_CMDID,
+ WMI_10_4_GET_TSF_TIMER_CMDID,
+ WMI_10_4_PDEV_GET_TPC_TABLE_CMDID,
+ WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID,
+ WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID,
+ WMI_10_4_TDLS_SET_STATE_CMDID,
+ WMI_10_4_TDLS_PEER_UPDATE_CMDID,
+ WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID,
+ WMI_10_4_PDEV_SEND_FD_CMDID,
+ WMI_10_4_ENABLE_FILS_CMDID,
+ WMI_10_4_PDEV_SET_BRIDGE_MACADDR_CMDID,
+ WMI_10_4_ATF_GROUP_WMM_AC_CONFIG_REQUEST_CMDID,
+ WMI_10_4_RADAR_FOUND_CMDID,
+ WMI_10_4_PEER_CFR_CAPTURE_CMDID,
+ WMI_10_4_PER_PEER_PER_TID_CONFIG_CMDID,
+ WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1,
+};
+
+enum wmi_10_4_event_id {
+ WMI_10_4_SERVICE_READY_EVENTID = 0x8000,
+ WMI_10_4_READY_EVENTID,
+ WMI_10_4_DEBUG_MESG_EVENTID,
+ WMI_10_4_START_EVENTID = 0x9000,
+ WMI_10_4_END_EVENTID = 0x9FFF,
+ WMI_10_4_SCAN_EVENTID = WMI_10_4_START_EVENTID,
+ WMI_10_4_ECHO_EVENTID,
+ WMI_10_4_UPDATE_STATS_EVENTID,
+ WMI_10_4_INST_RSSI_STATS_EVENTID,
+ WMI_10_4_VDEV_START_RESP_EVENTID,
+ WMI_10_4_VDEV_STANDBY_REQ_EVENTID,
+ WMI_10_4_VDEV_RESUME_REQ_EVENTID,
+ WMI_10_4_VDEV_STOPPED_EVENTID,
+ WMI_10_4_PEER_STA_KICKOUT_EVENTID,
+ WMI_10_4_HOST_SWBA_EVENTID,
+ WMI_10_4_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_10_4_MGMT_RX_EVENTID,
+ WMI_10_4_CHAN_INFO_EVENTID,
+ WMI_10_4_PHYERR_EVENTID,
+ WMI_10_4_ROAM_EVENTID,
+ WMI_10_4_PROFILE_MATCH,
+ WMI_10_4_DEBUG_PRINT_EVENTID,
+ WMI_10_4_PDEV_QVIT_EVENTID,
+ WMI_10_4_WLAN_PROFILE_DATA_EVENTID,
+ WMI_10_4_RTT_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_4_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_4_RTT_ERROR_REPORT_EVENTID,
+ WMI_10_4_RTT_KEEPALIVE_EVENTID,
+ WMI_10_4_OEM_CAPABILITY_EVENTID,
+ WMI_10_4_OEM_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_4_OEM_ERROR_REPORT_EVENTID,
+ WMI_10_4_NAN_EVENTID,
+ WMI_10_4_WOW_WAKEUP_HOST_EVENTID,
+ WMI_10_4_GTK_OFFLOAD_STATUS_EVENTID,
+ WMI_10_4_GTK_REKEY_FAIL_EVENTID,
+ WMI_10_4_DCS_INTERFERENCE_EVENTID,
+ WMI_10_4_PDEV_TPC_CONFIG_EVENTID,
+ WMI_10_4_CSA_HANDLING_EVENTID,
+ WMI_10_4_GPIO_INPUT_EVENTID,
+ WMI_10_4_PEER_RATECODE_LIST_EVENTID,
+ WMI_10_4_GENERIC_BUFFER_EVENTID,
+ WMI_10_4_MCAST_BUF_RELEASE_EVENTID,
+ WMI_10_4_MCAST_LIST_AGEOUT_EVENTID,
+ WMI_10_4_VDEV_GET_KEEPALIVE_EVENTID,
+ WMI_10_4_WDS_PEER_EVENTID,
+ WMI_10_4_PEER_STA_PS_STATECHG_EVENTID,
+ WMI_10_4_PDEV_FIPS_EVENTID,
+ WMI_10_4_TT_STATS_EVENTID,
+ WMI_10_4_PDEV_CHANNEL_HOPPING_EVENTID,
+ WMI_10_4_PDEV_ANI_CCK_LEVEL_EVENTID,
+ WMI_10_4_PDEV_ANI_OFDM_LEVEL_EVENTID,
+ WMI_10_4_PDEV_RESERVE_AST_ENTRY_EVENTID,
+ WMI_10_4_PDEV_NFCAL_POWER_EVENTID,
+ WMI_10_4_PDEV_TPC_EVENTID,
+ WMI_10_4_PDEV_GET_AST_INFO_EVENTID,
+ WMI_10_4_PDEV_TEMPERATURE_EVENTID,
+ WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+ WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID,
+ WMI_10_4_MU_REPORT_EVENTID,
+ WMI_10_4_TX_DATA_TRAFFIC_CTRL_EVENTID,
+ WMI_10_4_PEER_TX_MU_TXMIT_COUNT_EVENTID,
+ WMI_10_4_PEER_GID_USERPOS_LIST_EVENTID,
+ WMI_10_4_PDEV_CHECK_CAL_VERSION_EVENTID,
+ WMI_10_4_ATF_PEER_STATS_EVENTID,
+ WMI_10_4_PDEV_GET_RX_FILTER_EVENTID,
+ WMI_10_4_NAC_RSSI_EVENTID,
+ WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID,
+ WMI_10_4_GET_TSF_TIMER_RESP_EVENTID,
+ WMI_10_4_PDEV_TPC_TABLE_EVENTID,
+ WMI_10_4_PDEV_WDS_ENTRY_LIST_EVENTID,
+ WMI_10_4_TDLS_PEER_EVENTID,
+ WMI_10_4_HOST_SWFDA_EVENTID,
+ WMI_10_4_ESP_ESTIMATE_EVENTID,
+ WMI_10_4_DFS_STATUS_CHECK_EVENTID,
+ WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1,
+};
+
+enum wmi_phy_mode {
+ MODE_11A = 0, /* 11a Mode */
+ MODE_11G = 1, /* 11b/g Mode */
+ MODE_11B = 2, /* 11b Mode */
+ MODE_11GONLY = 3, /* 11g only Mode */
+ MODE_11NA_HT20 = 4, /* 11a HT20 mode */
+ MODE_11NG_HT20 = 5, /* 11g HT20 mode */
+ MODE_11NA_HT40 = 6, /* 11a HT40 mode */
+ MODE_11NG_HT40 = 7, /* 11g HT40 mode */
+ MODE_11AC_VHT20 = 8,
+ MODE_11AC_VHT40 = 9,
+ MODE_11AC_VHT80 = 10,
+ /* MODE_11AC_VHT160 = 11, */
+ MODE_11AC_VHT20_2G = 11,
+ MODE_11AC_VHT40_2G = 12,
+ MODE_11AC_VHT80_2G = 13,
+ MODE_11AC_VHT80_80 = 14,
+ MODE_11AC_VHT160 = 15,
+ MODE_UNKNOWN = 16,
+ MODE_MAX = 16
+};
+
+static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+ switch (mode) {
+ case MODE_11A:
+ return "11a";
+ case MODE_11G:
+ return "11g";
+ case MODE_11B:
+ return "11b";
+ case MODE_11GONLY:
+ return "11gonly";
+ case MODE_11NA_HT20:
+ return "11na-ht20";
+ case MODE_11NG_HT20:
+ return "11ng-ht20";
+ case MODE_11NA_HT40:
+ return "11na-ht40";
+ case MODE_11NG_HT40:
+ return "11ng-ht40";
+ case MODE_11AC_VHT20:
+ return "11ac-vht20";
+ case MODE_11AC_VHT40:
+ return "11ac-vht40";
+ case MODE_11AC_VHT80:
+ return "11ac-vht80";
+ case MODE_11AC_VHT160:
+ return "11ac-vht160";
+ case MODE_11AC_VHT80_80:
+ return "11ac-vht80+80";
+ case MODE_11AC_VHT20_2G:
+ return "11ac-vht20-2g";
+ case MODE_11AC_VHT40_2G:
+ return "11ac-vht40-2g";
+ case MODE_11AC_VHT80_2G:
+ return "11ac-vht80-2g";
+ case MODE_UNKNOWN:
+ /* skip */
+ break;
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled
+ */
+ }
+
+ return "<unknown>";
+}
+
+#define WMI_CHAN_LIST_TAG 0x1
+#define WMI_SSID_LIST_TAG 0x2
+#define WMI_BSSID_LIST_TAG 0x3
+#define WMI_IE_TAG 0x4
+
+struct wmi_channel {
+ __le32 mhz;
+ __le32 band_center_freq1;
+ __le32 band_center_freq2; /* valid for 11ac, 80plus80 */
+ union {
+ __le32 flags; /* WMI_CHAN_FLAG_ */
+ struct {
+ u8 mode; /* only 6 LSBs */
+ } __packed;
+ } __packed;
+ union {
+ __le32 reginfo0;
+ struct {
+ /* note: power unit is 0.5 dBm */
+ u8 min_power;
+ u8 max_power;
+ u8 reg_power;
+ u8 reg_classid;
+ } __packed;
+ } __packed;
+ union {
+ __le32 reginfo1;
+ struct {
+ /* note: power unit is 1 dBm */
+ u8 antenna_max;
+ /* note: power unit is 0.5 dBm */
+ u8 max_tx_power;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_channel_arg {
+ u32 freq;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ bool passive;
+ bool allow_ibss;
+ bool allow_ht;
+ bool allow_vht;
+ bool ht40plus;
+ bool chan_radar;
+ /* note: power unit is 0.5 dBm */
+ u32 min_power;
+ u32 max_power;
+ u32 max_reg_power;
+ /* note: power unit is 1 dBm */
+ u32 max_antenna_gain;
+ u32 reg_class_id;
+ enum wmi_phy_mode mode;
+};
+
+enum wmi_channel_change_cause {
+ WMI_CHANNEL_CHANGE_CAUSE_NONE = 0,
+ WMI_CHANNEL_CHANGE_CAUSE_CSA,
+};
+
+#define WMI_CHAN_FLAG_HT40_PLUS (1 << 6)
+#define WMI_CHAN_FLAG_PASSIVE (1 << 7)
+#define WMI_CHAN_FLAG_ADHOC_ALLOWED (1 << 8)
+#define WMI_CHAN_FLAG_AP_DISABLED (1 << 9)
+#define WMI_CHAN_FLAG_DFS (1 << 10)
+#define WMI_CHAN_FLAG_ALLOW_HT (1 << 11)
+#define WMI_CHAN_FLAG_ALLOW_VHT (1 << 12)
+
+/* Indicate reason for channel switch */
+#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
+/* DFS required on channel for 2nd segment of VHT160 and VHT80+80*/
+#define WMI_CHAN_FLAG_DFS_CFREQ2 (1 << 15)
+#define WMI_MAX_SPATIAL_STREAM 3 /* default max ss */
+
+/* HT Capabilities*/
+#define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */
+#define WMI_HT_CAP_HT20_SGI 0x0002 /* Short Guard Interval with HT20 */
+#define WMI_HT_CAP_DYNAMIC_SMPS 0x0004 /* Dynamic MIMO powersave */
+#define WMI_HT_CAP_TX_STBC 0x0008 /* B3 TX STBC */
+#define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3
+#define WMI_HT_CAP_RX_STBC 0x0030 /* B4-B5 RX STBC */
+#define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4
+#define WMI_HT_CAP_LDPC 0x0040 /* LDPC supported */
+#define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080 /* L-SIG TXOP Protection */
+#define WMI_HT_CAP_MPDU_DENSITY 0x0700 /* MPDU Density */
+#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
+#define WMI_HT_CAP_HT40_SGI 0x0800
+#define WMI_HT_CAP_RX_LDPC 0x1000 /* LDPC RX support */
+#define WMI_HT_CAP_TX_LDPC 0x2000 /* LDPC TX support */
+
+#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \
+ WMI_HT_CAP_HT20_SGI | \
+ WMI_HT_CAP_HT40_SGI | \
+ WMI_HT_CAP_TX_STBC | \
+ WMI_HT_CAP_RX_STBC | \
+ WMI_HT_CAP_LDPC)
+
+/*
+ * WMI_VHT_CAP_* these maps to ieee 802.11ac vht capability information
+ * field. The fields not defined here are not supported, or reserved.
+ * Do not change these masks and if you have to add new one follow the
+ * bitmask as specified by 802.11ac draft.
+ */
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003
+#define WMI_VHT_CAP_RX_LDPC 0x00000010
+#define WMI_VHT_CAP_SGI_80MHZ 0x00000020
+#define WMI_VHT_CAP_SGI_160MHZ 0x00000040
+#define WMI_VHT_CAP_TX_STBC 0x00000080
+#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300
+#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8
+#define WMI_VHT_CAP_SU_BFER 0x00000800
+#define WMI_VHT_CAP_SU_BFEE 0x00001000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK 0x00070000
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16
+#define WMI_VHT_CAP_MU_BFER 0x00080000
+#define WMI_VHT_CAP_MU_BFEE 0x00100000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT 23
+#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000
+#define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000
+
+/* The following also refer for max HT AMSDU */
+#define WMI_VHT_CAP_MAX_MPDU_LEN_3839 0x00000000
+#define WMI_VHT_CAP_MAX_MPDU_LEN_7935 0x00000001
+#define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002
+
+#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \
+ WMI_VHT_CAP_RX_LDPC | \
+ WMI_VHT_CAP_SGI_80MHZ | \
+ WMI_VHT_CAP_TX_STBC | \
+ WMI_VHT_CAP_RX_STBC_MASK | \
+ WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \
+ WMI_VHT_CAP_RX_FIXED_ANT | \
+ WMI_VHT_CAP_TX_FIXED_ANT)
+
+/*
+ * Interested readers refer to Rx/Tx MCS Map definition as defined in
+ * 802.11ac
+ */
+#define WMI_VHT_MAX_MCS_4_SS_MASK(r, ss) ((3 & (r)) << (((ss) - 1) << 1))
+#define WMI_VHT_MAX_SUPP_RATE_MASK 0x1fff0000
+#define WMI_VHT_MAX_SUPP_RATE_MASK_SHIFT 16
+
+enum {
+ REGDMN_MODE_11A = 0x00001, /* 11a channels */
+ REGDMN_MODE_TURBO = 0x00002, /* 11a turbo-only channels */
+ REGDMN_MODE_11B = 0x00004, /* 11b channels */
+ REGDMN_MODE_PUREG = 0x00008, /* 11g channels (OFDM only) */
+ REGDMN_MODE_11G = 0x00008, /* XXX historical */
+ REGDMN_MODE_108G = 0x00020, /* 11a+Turbo channels */
+ REGDMN_MODE_108A = 0x00040, /* 11g+Turbo channels */
+ REGDMN_MODE_XR = 0x00100, /* XR channels */
+ REGDMN_MODE_11A_HALF_RATE = 0x00200, /* 11A half rate channels */
+ REGDMN_MODE_11A_QUARTER_RATE = 0x00400, /* 11A quarter rate channels */
+ REGDMN_MODE_11NG_HT20 = 0x00800, /* 11N-G HT20 channels */
+ REGDMN_MODE_11NA_HT20 = 0x01000, /* 11N-A HT20 channels */
+ REGDMN_MODE_11NG_HT40PLUS = 0x02000, /* 11N-G HT40 + channels */
+ REGDMN_MODE_11NG_HT40MINUS = 0x04000, /* 11N-G HT40 - channels */
+ REGDMN_MODE_11NA_HT40PLUS = 0x08000, /* 11N-A HT40 + channels */
+ REGDMN_MODE_11NA_HT40MINUS = 0x10000, /* 11N-A HT40 - channels */
+ REGDMN_MODE_11AC_VHT20 = 0x20000, /* 5Ghz, VHT20 */
+ REGDMN_MODE_11AC_VHT40PLUS = 0x40000, /* 5Ghz, VHT40 + channels */
+ REGDMN_MODE_11AC_VHT40MINUS = 0x80000, /* 5Ghz VHT40 - channels */
+ REGDMN_MODE_11AC_VHT80 = 0x100000, /* 5Ghz, VHT80 channels */
+ REGDMN_MODE_11AC_VHT160 = 0x200000, /* 5Ghz, VHT160 channels */
+ REGDMN_MODE_11AC_VHT80_80 = 0x400000, /* 5Ghz, VHT80+80 channels */
+ REGDMN_MODE_ALL = 0xffffffff
+};
+
+#define REGDMN_CAP1_CHAN_HALF_RATE 0x00000001
+#define REGDMN_CAP1_CHAN_QUARTER_RATE 0x00000002
+#define REGDMN_CAP1_CHAN_HAL49GHZ 0x00000004
+
+/* regulatory capabilities */
+#define REGDMN_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U2 0x0100
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800
+
+struct hal_reg_capabilities {
+ /* regdomain value specified in EEPROM */
+ __le32 eeprom_rd;
+ /*regdomain */
+ __le32 eeprom_rd_ext;
+ /* CAP1 capabilities bit map. */
+ __le32 regcap1;
+ /* REGDMN EEPROM CAP. */
+ __le32 regcap2;
+ /* REGDMN MODE */
+ __le32 wireless_modes;
+ __le32 low_2ghz_chan;
+ __le32 high_2ghz_chan;
+ __le32 low_5ghz_chan;
+ __le32 high_5ghz_chan;
+} __packed;
+
+enum wlan_mode_capability {
+ WHAL_WLAN_11A_CAPABILITY = 0x1,
+ WHAL_WLAN_11G_CAPABILITY = 0x2,
+ WHAL_WLAN_11AG_CAPABILITY = 0x3,
+};
+
+/* structure used by FW for requesting host memory */
+struct wlan_host_mem_req {
+ /* ID of the request */
+ __le32 req_id;
+ /* size of the of each unit */
+ __le32 unit_size;
+ /* flags to indicate that
+ * the number units is dependent
+ * on number of resources(num vdevs num peers .. etc)
+ */
+ __le32 num_unit_info;
+ /*
+ * actual number of units to allocate . if flags in the num_unit_info
+ * indicate that number of units is tied to number of a particular
+ * resource to allocate then num_units filed is set to 0 and host
+ * will derive the number units from number of the resources it is
+ * requesting.
+ */
+ __le32 num_units;
+} __packed;
+
+/*
+ * The following struct holds optional payload for
+ * wmi_service_ready_event,e.g., 11ac pass some of the
+ * device capability to the host.
+ */
+struct wmi_service_ready_event {
+ __le32 sw_version;
+ __le32 sw_version_1;
+ __le32 abi_version;
+ /* WMI_PHY_CAPABILITY */
+ __le32 phy_capability;
+ /* Maximum number of frag table entries that SW will populate less 1 */
+ __le32 max_frag_entry;
+ __le32 wmi_service_bitmap[16];
+ __le32 num_rf_chains;
+ /*
+ * The following field is only valid for service type
+ * WMI_SERVICE_11AC
+ */
+ __le32 ht_cap_info; /* WMI HT Capability */
+ __le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+ __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ struct hal_reg_capabilities hal_reg_capabilities;
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+ /*
+ * Max beacon and Probe Response IE offload size
+ * (includes optional P2P IEs)
+ */
+ __le32 max_bcn_ie_size;
+ /*
+ * request to host to allocate a chuck of memory and pss it down to FW
+ * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+ * data structures. Only valid for low latency interfaces like PCIE
+ * where FW can access this memory directly (or) by DMA.
+ */
+ __le32 num_mem_reqs;
+ struct wlan_host_mem_req mem_reqs[];
+} __packed;
+
+/* This is the definition from 10.X firmware branch */
+struct wmi_10x_service_ready_event {
+ __le32 sw_version;
+ __le32 abi_version;
+
+ /* WMI_PHY_CAPABILITY */
+ __le32 phy_capability;
+
+ /* Maximum number of frag table entries that SW will populate less 1 */
+ __le32 max_frag_entry;
+ __le32 wmi_service_bitmap[16];
+ __le32 num_rf_chains;
+
+ /*
+ * The following field is only valid for service type
+ * WMI_SERVICE_11AC
+ */
+ __le32 ht_cap_info; /* WMI HT Capability */
+ __le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+ __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+
+ struct hal_reg_capabilities hal_reg_capabilities;
+
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+
+ /*
+ * request to host to allocate a chuck of memory and pss it down to FW
+ * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+ * data structures. Only valid for low latency interfaces like PCIE
+ * where FW can access this memory directly (or) by DMA.
+ */
+ __le32 num_mem_reqs;
+
+ struct wlan_host_mem_req mem_reqs[];
+} __packed;
+
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
+#define WMI_UNIFIED_READY_TIMEOUT_HZ (5 * HZ)
+
+struct wmi_ready_event {
+ __le32 sw_version;
+ __le32 abi_version;
+ struct wmi_mac_addr mac_addr;
+ __le32 status;
+} __packed;
+
+struct wmi_resource_config {
+ /* number of virtual devices (VAPs) to support */
+ __le32 num_vdevs;
+
+ /* number of peer nodes to support */
+ __le32 num_peers;
+
+ /*
+ * In offload mode target supports features like WOW, chatter and
+ * other protocol offloads. In order to support them some
+ * functionalities like reorder buffering, PN checking need to be
+ * done in target. This determines maximum number of peers supported
+ * by target in offload mode
+ */
+ __le32 num_offload_peers;
+
+ /* For target-based RX reordering */
+ __le32 num_offload_reorder_bufs;
+
+ /* number of keys per peer */
+ __le32 num_peer_keys;
+
+ /* total number of TX/RX data TIDs */
+ __le32 num_tids;
+
+ /*
+ * max skid for resolving hash collisions
+ *
+ * The address search table is sparse, so that if two MAC addresses
+ * result in the same hash value, the second of these conflicting
+ * entries can slide to the next index in the address search table,
+ * and use it, if it is unoccupied. This ast_skid_limit parameter
+ * specifies the upper bound on how many subsequent indices to search
+ * over to find an unoccupied space.
+ */
+ __le32 ast_skid_limit;
+
+ /*
+ * the nominal chain mask for transmit
+ *
+ * The chain mask may be modified dynamically, e.g. to operate AP
+ * tx with a reduced number of chains if no clients are associated.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of tx chains.
+ */
+ __le32 tx_chain_mask;
+
+ /*
+ * the nominal chain mask for receive
+ *
+ * The chain mask may be modified dynamically, e.g. for a client
+ * to use a reduced number of chains for receive if the traffic to
+ * the client is low enough that it doesn't require downlink MIMO
+ * or antenna diversity.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of rx chains.
+ */
+ __le32 rx_chain_mask;
+
+ /*
+ * what rx reorder timeout (ms) to use for the AC
+ *
+ * Each WMM access class (voice, video, best-effort, background) will
+ * have its own timeout value to dictate how long to wait for missing
+ * rx MPDUs to arrive before flushing subsequent MPDUs that have
+ * already been received.
+ * This parameter specifies the timeout in milliseconds for each
+ * class.
+ */
+ __le32 rx_timeout_pri_vi;
+ __le32 rx_timeout_pri_vo;
+ __le32 rx_timeout_pri_be;
+ __le32 rx_timeout_pri_bk;
+
+ /*
+ * what mode the rx should decap packets to
+ *
+ * MAC can decap to RAW (no decap), native wifi or Ethernet types
+ * THis setting also determines the default TX behavior, however TX
+ * behavior can be modified on a per VAP basis during VAP init
+ */
+ __le32 rx_decap_mode;
+
+ /* what is the maximum number of scan requests that can be queued */
+ __le32 scan_max_pending_reqs;
+
+ /* maximum VDEV that could use BMISS offload */
+ __le32 bmiss_offload_max_vdev;
+
+ /* maximum VDEV that could use offload roaming */
+ __le32 roam_offload_max_vdev;
+
+ /* maximum AP profiles that would push to offload roaming */
+ __le32 roam_offload_max_ap_profiles;
+
+ /*
+ * how many groups to use for mcast->ucast conversion
+ *
+ * The target's WAL maintains a table to hold information regarding
+ * which peers belong to a given multicast group, so that if
+ * multicast->unicast conversion is enabled, the target can convert
+ * multicast tx frames to a series of unicast tx frames, to each
+ * peer within the multicast group.
+ This num_mcast_groups configuration parameter tells the target how
+ * many multicast groups to provide storage for within its multicast
+ * group membership table.
+ */
+ __le32 num_mcast_groups;
+
+ /*
+ * size to alloc for the mcast membership table
+ *
+ * This num_mcast_table_elems configuration parameter tells the
+ * target how many peer elements it needs to provide storage for in
+ * its multicast group membership table.
+ * These multicast group membership table elements are shared by the
+ * multicast groups stored within the table.
+ */
+ __le32 num_mcast_table_elems;
+
+ /*
+ * whether/how to do multicast->unicast conversion
+ *
+ * This configuration parameter specifies whether the target should
+ * perform multicast --> unicast conversion on transmit, and if so,
+ * what to do if it finds no entries in its multicast group
+ * membership table for the multicast IP address in the tx frame.
+ * Configuration value:
+ * 0 -> Do not perform multicast to unicast conversion.
+ * 1 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * drop the frame.
+ * 2 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * transmit the frame as multicast.
+ */
+ __le32 mcast2ucast_mode;
+
+ /*
+ * how much memory to allocate for a tx PPDU dbg log
+ *
+ * This parameter controls how much memory the target will allocate
+ * to store a log of tx PPDU meta-information (how large the PPDU
+ * was, when it was sent, whether it was successful, etc.)
+ */
+ __le32 tx_dbg_log_size;
+
+ /* how many AST entries to be allocated for WDS */
+ __le32 num_wds_entries;
+
+ /*
+ * MAC DMA burst size, e.g., For target PCI limit can be
+ * 0 -default, 1 256B
+ */
+ __le32 dma_burst_size;
+
+ /*
+ * Fixed delimiters to be inserted after every MPDU to
+ * account for interface latency to avoid underrun.
+ */
+ __le32 mac_aggr_delim;
+
+ /*
+ * determine whether target is responsible for detecting duplicate
+ * non-aggregate MPDU and timing out stale fragments.
+ *
+ * A-MPDU reordering is always performed on the target.
+ *
+ * 0: target responsible for frag timeout and dup checking
+ * 1: host responsible for frag timeout and dup checking
+ */
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+ /*
+ * Configuration for VoW :
+ * No of Video Nodes to be supported
+ * and Max no of descriptors for each Video link (node).
+ */
+ __le32 vow_config;
+
+ /* maximum VDEV that could use GTK offload */
+ __le32 gtk_offload_max_vdev;
+
+ /* Number of msdu descriptors target should use */
+ __le32 num_msdu_desc;
+
+ /*
+ * Max. number of Tx fragments per MSDU
+ * This parameter controls the max number of Tx fragments per MSDU.
+ * This is sent by the target as part of the WMI_SERVICE_READY event
+ * and is overridden by the OS shim as required.
+ */
+ __le32 max_frag_entries;
+} __packed;
+
+struct wmi_resource_config_10x {
+ /* number of virtual devices (VAPs) to support */
+ __le32 num_vdevs;
+
+ /* number of peer nodes to support */
+ __le32 num_peers;
+
+ /* number of keys per peer */
+ __le32 num_peer_keys;
+
+ /* total number of TX/RX data TIDs */
+ __le32 num_tids;
+
+ /*
+ * max skid for resolving hash collisions
+ *
+ * The address search table is sparse, so that if two MAC addresses
+ * result in the same hash value, the second of these conflicting
+ * entries can slide to the next index in the address search table,
+ * and use it, if it is unoccupied. This ast_skid_limit parameter
+ * specifies the upper bound on how many subsequent indices to search
+ * over to find an unoccupied space.
+ */
+ __le32 ast_skid_limit;
+
+ /*
+ * the nominal chain mask for transmit
+ *
+ * The chain mask may be modified dynamically, e.g. to operate AP
+ * tx with a reduced number of chains if no clients are associated.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of tx chains.
+ */
+ __le32 tx_chain_mask;
+
+ /*
+ * the nominal chain mask for receive
+ *
+ * The chain mask may be modified dynamically, e.g. for a client
+ * to use a reduced number of chains for receive if the traffic to
+ * the client is low enough that it doesn't require downlink MIMO
+ * or antenna diversity.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of rx chains.
+ */
+ __le32 rx_chain_mask;
+
+ /*
+ * what rx reorder timeout (ms) to use for the AC
+ *
+ * Each WMM access class (voice, video, best-effort, background) will
+ * have its own timeout value to dictate how long to wait for missing
+ * rx MPDUs to arrive before flushing subsequent MPDUs that have
+ * already been received.
+ * This parameter specifies the timeout in milliseconds for each
+ * class.
+ */
+ __le32 rx_timeout_pri_vi;
+ __le32 rx_timeout_pri_vo;
+ __le32 rx_timeout_pri_be;
+ __le32 rx_timeout_pri_bk;
+
+ /*
+ * what mode the rx should decap packets to
+ *
+ * MAC can decap to RAW (no decap), native wifi or Ethernet types
+ * THis setting also determines the default TX behavior, however TX
+ * behavior can be modified on a per VAP basis during VAP init
+ */
+ __le32 rx_decap_mode;
+
+ /* what is the maximum number of scan requests that can be queued */
+ __le32 scan_max_pending_reqs;
+
+ /* maximum VDEV that could use BMISS offload */
+ __le32 bmiss_offload_max_vdev;
+
+ /* maximum VDEV that could use offload roaming */
+ __le32 roam_offload_max_vdev;
+
+ /* maximum AP profiles that would push to offload roaming */
+ __le32 roam_offload_max_ap_profiles;
+
+ /*
+ * how many groups to use for mcast->ucast conversion
+ *
+ * The target's WAL maintains a table to hold information regarding
+ * which peers belong to a given multicast group, so that if
+ * multicast->unicast conversion is enabled, the target can convert
+ * multicast tx frames to a series of unicast tx frames, to each
+ * peer within the multicast group.
+ This num_mcast_groups configuration parameter tells the target how
+ * many multicast groups to provide storage for within its multicast
+ * group membership table.
+ */
+ __le32 num_mcast_groups;
+
+ /*
+ * size to alloc for the mcast membership table
+ *
+ * This num_mcast_table_elems configuration parameter tells the
+ * target how many peer elements it needs to provide storage for in
+ * its multicast group membership table.
+ * These multicast group membership table elements are shared by the
+ * multicast groups stored within the table.
+ */
+ __le32 num_mcast_table_elems;
+
+ /*
+ * whether/how to do multicast->unicast conversion
+ *
+ * This configuration parameter specifies whether the target should
+ * perform multicast --> unicast conversion on transmit, and if so,
+ * what to do if it finds no entries in its multicast group
+ * membership table for the multicast IP address in the tx frame.
+ * Configuration value:
+ * 0 -> Do not perform multicast to unicast conversion.
+ * 1 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * drop the frame.
+ * 2 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * transmit the frame as multicast.
+ */
+ __le32 mcast2ucast_mode;
+
+ /*
+ * how much memory to allocate for a tx PPDU dbg log
+ *
+ * This parameter controls how much memory the target will allocate
+ * to store a log of tx PPDU meta-information (how large the PPDU
+ * was, when it was sent, whether it was successful, etc.)
+ */
+ __le32 tx_dbg_log_size;
+
+ /* how many AST entries to be allocated for WDS */
+ __le32 num_wds_entries;
+
+ /*
+ * MAC DMA burst size, e.g., For target PCI limit can be
+ * 0 -default, 1 256B
+ */
+ __le32 dma_burst_size;
+
+ /*
+ * Fixed delimiters to be inserted after every MPDU to
+ * account for interface latency to avoid underrun.
+ */
+ __le32 mac_aggr_delim;
+
+ /*
+ * determine whether target is responsible for detecting duplicate
+ * non-aggregate MPDU and timing out stale fragments.
+ *
+ * A-MPDU reordering is always performed on the target.
+ *
+ * 0: target responsible for frag timeout and dup checking
+ * 1: host responsible for frag timeout and dup checking
+ */
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+ /*
+ * Configuration for VoW :
+ * No of Video Nodes to be supported
+ * and Max no of descriptors for each Video link (node).
+ */
+ __le32 vow_config;
+
+ /* Number of msdu descriptors target should use */
+ __le32 num_msdu_desc;
+
+ /*
+ * Max. number of Tx fragments per MSDU
+ * This parameter controls the max number of Tx fragments per MSDU.
+ * This is sent by the target as part of the WMI_SERVICE_READY event
+ * and is overridden by the OS shim as required.
+ */
+ __le32 max_frag_entries;
+} __packed;
+
+enum wmi_10_2_feature_mask {
+ WMI_10_2_RX_BATCH_MODE = BIT(0),
+ WMI_10_2_ATF_CONFIG = BIT(1),
+ WMI_10_2_COEX_GPIO = BIT(3),
+ WMI_10_2_BSS_CHAN_INFO = BIT(6),
+ WMI_10_2_PEER_STATS = BIT(7),
+};
+
+struct wmi_resource_config_10_2 {
+ struct wmi_resource_config_10x common;
+ __le32 max_peer_ext_stats;
+ __le32 smart_ant_cap; /* 0-disable, 1-enable */
+ __le32 bk_min_free;
+ __le32 be_min_free;
+ __le32 vi_min_free;
+ __le32 vo_min_free;
+ __le32 feature_mask;
+} __packed;
+
+#define NUM_UNITS_IS_NUM_VDEVS BIT(0)
+#define NUM_UNITS_IS_NUM_PEERS BIT(1)
+#define NUM_UNITS_IS_NUM_ACTIVE_PEERS BIT(2)
+
+struct wmi_resource_config_10_4 {
+ /* Number of virtual devices (VAPs) to support */
+ __le32 num_vdevs;
+
+ /* Number of peer nodes to support */
+ __le32 num_peers;
+
+ /* Number of active peer nodes to support */
+ __le32 num_active_peers;
+
+ /* In offload mode, target supports features like WOW, chatter and other
+ * protocol offloads. In order to support them some functionalities like
+ * reorder buffering, PN checking need to be done in target.
+ * This determines maximum number of peers supported by target in
+ * offload mode.
+ */
+ __le32 num_offload_peers;
+
+ /* Number of reorder buffers available for doing target based reorder
+ * Rx reorder buffering
+ */
+ __le32 num_offload_reorder_buffs;
+
+ /* Number of keys per peer */
+ __le32 num_peer_keys;
+
+ /* Total number of TX/RX data TIDs */
+ __le32 num_tids;
+
+ /* Max skid for resolving hash collisions.
+ * The address search table is sparse, so that if two MAC addresses
+ * result in the same hash value, the second of these conflicting
+ * entries can slide to the next index in the address search table,
+ * and use it, if it is unoccupied. This ast_skid_limit parameter
+ * specifies the upper bound on how many subsequent indices to search
+ * over to find an unoccupied space.
+ */
+ __le32 ast_skid_limit;
+
+ /* The nominal chain mask for transmit.
+ * The chain mask may be modified dynamically, e.g. to operate AP tx
+ * with a reduced number of chains if no clients are associated.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of tx chains.
+ */
+ __le32 tx_chain_mask;
+
+ /* The nominal chain mask for receive.
+ * The chain mask may be modified dynamically, e.g. for a client to use
+ * a reduced number of chains for receive if the traffic to the client
+ * is low enough that it doesn't require downlink MIMO or antenna
+ * diversity. This configuration parameter specifies the nominal
+ * chain-mask that should be used when not operating with a reduced
+ * set of rx chains.
+ */
+ __le32 rx_chain_mask;
+
+ /* What rx reorder timeout (ms) to use for the AC.
+ * Each WMM access class (voice, video, best-effort, background) will
+ * have its own timeout value to dictate how long to wait for missing
+ * rx MPDUs to arrive before flushing subsequent MPDUs that have already
+ * been received. This parameter specifies the timeout in milliseconds
+ * for each class.
+ */
+ __le32 rx_timeout_pri[4];
+
+ /* What mode the rx should decap packets to.
+ * MAC can decap to RAW (no decap), native wifi or Ethernet types.
+ * This setting also determines the default TX behavior, however TX
+ * behavior can be modified on a per VAP basis during VAP init
+ */
+ __le32 rx_decap_mode;
+
+ __le32 scan_max_pending_req;
+
+ __le32 bmiss_offload_max_vdev;
+
+ __le32 roam_offload_max_vdev;
+
+ __le32 roam_offload_max_ap_profiles;
+
+ /* How many groups to use for mcast->ucast conversion.
+ * The target's WAL maintains a table to hold information regarding
+ * which peers belong to a given multicast group, so that if
+ * multicast->unicast conversion is enabled, the target can convert
+ * multicast tx frames to a series of unicast tx frames, to each peer
+ * within the multicast group. This num_mcast_groups configuration
+ * parameter tells the target how many multicast groups to provide
+ * storage for within its multicast group membership table.
+ */
+ __le32 num_mcast_groups;
+
+ /* Size to alloc for the mcast membership table.
+ * This num_mcast_table_elems configuration parameter tells the target
+ * how many peer elements it needs to provide storage for in its
+ * multicast group membership table. These multicast group membership
+ * table elements are shared by the multicast groups stored within
+ * the table.
+ */
+ __le32 num_mcast_table_elems;
+
+ /* Whether/how to do multicast->unicast conversion.
+ * This configuration parameter specifies whether the target should
+ * perform multicast --> unicast conversion on transmit, and if so,
+ * what to do if it finds no entries in its multicast group membership
+ * table for the multicast IP address in the tx frame.
+ * Configuration value:
+ * 0 -> Do not perform multicast to unicast conversion.
+ * 1 -> Convert multicast frames to unicast, if the IP multicast address
+ * from the tx frame is found in the multicast group membership
+ * table. If the IP multicast address is not found, drop the frame
+ * 2 -> Convert multicast frames to unicast, if the IP multicast address
+ * from the tx frame is found in the multicast group membership
+ * table. If the IP multicast address is not found, transmit the
+ * frame as multicast.
+ */
+ __le32 mcast2ucast_mode;
+
+ /* How much memory to allocate for a tx PPDU dbg log.
+ * This parameter controls how much memory the target will allocate to
+ * store a log of tx PPDU meta-information (how large the PPDU was,
+ * when it was sent, whether it was successful, etc.)
+ */
+ __le32 tx_dbg_log_size;
+
+ /* How many AST entries to be allocated for WDS */
+ __le32 num_wds_entries;
+
+ /* MAC DMA burst size. 0 -default, 1 -256B */
+ __le32 dma_burst_size;
+
+ /* Fixed delimiters to be inserted after every MPDU to account for
+ * interface latency to avoid underrun.
+ */
+ __le32 mac_aggr_delim;
+
+ /* Determine whether target is responsible for detecting duplicate
+ * non-aggregate MPDU and timing out stale fragments. A-MPDU reordering
+ * is always performed on the target.
+ *
+ * 0: target responsible for frag timeout and dup checking
+ * 1: host responsible for frag timeout and dup checking
+ */
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+ /* Configuration for VoW : No of Video nodes to be supported and max
+ * no of descriptors for each video link (node).
+ */
+ __le32 vow_config;
+
+ /* Maximum vdev that could use gtk offload */
+ __le32 gtk_offload_max_vdev;
+
+ /* Number of msdu descriptors target should use */
+ __le32 num_msdu_desc;
+
+ /* Max number of tx fragments per MSDU.
+ * This parameter controls the max number of tx fragments per MSDU.
+ * This will passed by target as part of the WMI_SERVICE_READY event
+ * and is overridden by the OS shim as required.
+ */
+ __le32 max_frag_entries;
+
+ /* Max number of extended peer stats.
+ * This parameter controls the max number of peers for which extended
+ * statistics are supported by target
+ */
+ __le32 max_peer_ext_stats;
+
+ /* Smart antenna capabilities information.
+ * 1 - Smart antenna is enabled
+ * 0 - Smart antenna is disabled
+ * In future this can contain smart antenna specific capabilities.
+ */
+ __le32 smart_ant_cap;
+
+ /* User can configure the buffers allocated for each AC (BE, BK, VI, VO)
+ * during init.
+ */
+ __le32 bk_minfree;
+ __le32 be_minfree;
+ __le32 vi_minfree;
+ __le32 vo_minfree;
+
+ /* Rx batch mode capability.
+ * 1 - Rx batch mode enabled
+ * 0 - Rx batch mode disabled
+ */
+ __le32 rx_batchmode;
+
+ /* Thermal throttling capability.
+ * 1 - Capable of thermal throttling
+ * 0 - Not capable of thermal throttling
+ */
+ __le32 tt_support;
+
+ /* ATF configuration.
+ * 1 - Enable ATF
+ * 0 - Disable ATF
+ */
+ __le32 atf_config;
+
+ /* Configure padding to manage IP header un-alignment
+ * 1 - Enable padding
+ * 0 - Disable padding
+ */
+ __le32 iphdr_pad_config;
+
+ /* qwrap configuration (bits 15-0)
+ * 1 - This is qwrap configuration
+ * 0 - This is not qwrap
+ *
+ * Bits 31-16 is alloc_frag_desc_for_data_pkt (1 enables, 0 disables)
+ * In order to get ack-RSSI reporting and to specify the tx-rate for
+ * individual frames, this option must be enabled. This uses an extra
+ * 4 bytes per tx-msdu descriptor, so don't enable it unless you need it.
+ */
+ __le32 qwrap_config;
+} __packed;
+
+enum wmi_coex_version {
+ WMI_NO_COEX_VERSION_SUPPORT = 0,
+ /* 3 wire coex support*/
+ WMI_COEX_VERSION_1 = 1,
+ /* 2.5 wire coex support*/
+ WMI_COEX_VERSION_2 = 2,
+ /* 2.5 wire coex with duty cycle support */
+ WMI_COEX_VERSION_3 = 3,
+ /* 4 wire coex support*/
+ WMI_COEX_VERSION_4 = 4,
+};
+
+/**
+ * enum wmi_10_4_feature_mask - WMI 10.4 feature enable/disable flags
+ * @WMI_10_4_LTEU_SUPPORT: LTEU config
+ * @WMI_10_4_COEX_GPIO_SUPPORT: COEX GPIO config
+ * @WMI_10_4_AUX_RADIO_SPECTRAL_INTF: AUX Radio Enhancement for spectral scan
+ * @WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF: AUX Radio Enhancement for chan load scan
+ * @WMI_10_4_BSS_CHANNEL_INFO_64: BSS channel info stats
+ * @WMI_10_4_PEER_STATS: Per station stats
+ * @WMI_10_4_VDEV_STATS: Per vdev stats
+ * @WMI_10_4_TDLS: Implicit TDLS support in firmware enable/disable
+ * @WMI_10_4_TDLS_OFFCHAN: TDLS offchannel support enable/disable
+ * @WMI_10_4_TDLS_UAPSD_BUFFER_STA: TDLS buffer sta support enable/disable
+ * @WMI_10_4_TDLS_UAPSD_SLEEP_STA: TDLS sleep sta support enable/disable
+ * @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host
+ * enable/disable
+ * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY: Explicit TDLS mode enable/disable
+ * @WMI_10_4_TX_DATA_ACK_RSSI: Enable DATA ACK RSSI if firmware is capable
+ * @WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT: Firmware supports Extended Peer
+ * TID configuration for QoS related settings
+ * @WMI_10_4_REPORT_AIRTIME: Firmware supports transmit airtime reporting
+ */
+enum wmi_10_4_feature_mask {
+ WMI_10_4_LTEU_SUPPORT = BIT(0),
+ WMI_10_4_COEX_GPIO_SUPPORT = BIT(1),
+ WMI_10_4_AUX_RADIO_SPECTRAL_INTF = BIT(2),
+ WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF = BIT(3),
+ WMI_10_4_BSS_CHANNEL_INFO_64 = BIT(4),
+ WMI_10_4_PEER_STATS = BIT(5),
+ WMI_10_4_VDEV_STATS = BIT(6),
+ WMI_10_4_TDLS = BIT(7),
+ WMI_10_4_TDLS_OFFCHAN = BIT(8),
+ WMI_10_4_TDLS_UAPSD_BUFFER_STA = BIT(9),
+ WMI_10_4_TDLS_UAPSD_SLEEP_STA = BIT(10),
+ WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE = BIT(11),
+ WMI_10_4_TDLS_EXPLICIT_MODE_ONLY = BIT(12),
+ WMI_10_4_TX_DATA_ACK_RSSI = BIT(16),
+ WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT = BIT(17),
+ WMI_10_4_REPORT_AIRTIME = BIT(18),
+
+};
+
+/* WMI_GPIO_CONFIG_CMDID */
+enum {
+ WMI_GPIO_PULL_NONE,
+ WMI_GPIO_PULL_UP,
+ WMI_GPIO_PULL_DOWN,
+};
+
+enum {
+ WMI_GPIO_INTTYPE_DISABLE,
+ WMI_GPIO_INTTYPE_RISING_EDGE,
+ WMI_GPIO_INTTYPE_FALLING_EDGE,
+ WMI_GPIO_INTTYPE_BOTH_EDGE,
+ WMI_GPIO_INTTYPE_LEVEL_LOW,
+ WMI_GPIO_INTTYPE_LEVEL_HIGH
+};
+
+/* WMI_GPIO_CONFIG_CMDID */
+struct wmi_gpio_config_cmd {
+ __le32 gpio_num; /* GPIO number to be setup */
+ __le32 input; /* 0 - Output/ 1 - Input */
+ __le32 pull_type; /* Pull type defined above */
+ __le32 intr_mode; /* Interrupt mode defined above (Input) */
+} __packed;
+
+/* WMI_GPIO_OUTPUT_CMDID */
+struct wmi_gpio_output_cmd {
+ __le32 gpio_num; /* GPIO number to be setup */
+ __le32 set; /* Set the GPIO pin*/
+} __packed;
+
+/* WMI_GPIO_INPUT_EVENTID */
+struct wmi_gpio_input_event {
+ __le32 gpio_num; /* GPIO number which changed state */
+} __packed;
+
+struct wmi_ext_resource_config_10_4_cmd {
+ /* contains enum wmi_host_platform_type */
+ __le32 host_platform_config;
+ /* see enum wmi_10_4_feature_mask */
+ __le32 fw_feature_bitmap;
+ /* WLAN priority GPIO number */
+ __le32 wlan_gpio_priority;
+ /* see enum wmi_coex_version */
+ __le32 coex_version;
+ /* COEX GPIO config */
+ __le32 coex_gpio_pin1;
+ __le32 coex_gpio_pin2;
+ __le32 coex_gpio_pin3;
+ /* number of vdevs allowed to perform tdls */
+ __le32 num_tdls_vdevs;
+ /* number of peers to track per TDLS vdev */
+ __le32 num_tdls_conn_table_entries;
+ /* number of tdls sleep sta supported */
+ __le32 max_tdls_concurrent_sleep_sta;
+ /* number of tdls buffer sta supported */
+ __le32 max_tdls_concurrent_buffer_sta;
+};
+
+/* structure describing host memory chunk. */
+struct host_memory_chunk {
+ /* id of the request that is passed up in service ready */
+ __le32 req_id;
+ /* the physical address the memory chunk */
+ __le32 ptr;
+ /* size of the chunk */
+ __le32 size;
+} __packed;
+
+#define WMI_IRAM_RECOVERY_HOST_MEM_REQ_ID 8
+
+struct wmi_host_mem_chunks {
+ __le32 count;
+ /* some fw revisions require at least 1 chunk regardless of count */
+ union {
+ struct host_memory_chunk item;
+ DECLARE_FLEX_ARRAY(struct host_memory_chunk, items);
+ };
+} __packed;
+
+struct wmi_init_cmd {
+ struct wmi_resource_config resource_config;
+ struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
+/* _10x structure is from 10.X FW API */
+struct wmi_init_cmd_10x {
+ struct wmi_resource_config_10x resource_config;
+ struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
+struct wmi_init_cmd_10_2 {
+ struct wmi_resource_config_10_2 resource_config;
+ struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
+struct wmi_init_cmd_10_4 {
+ struct wmi_resource_config_10_4 resource_config;
+ struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
+struct wmi_chan_list_entry {
+ __le16 freq;
+ u8 phy_mode; /* valid for 10.2 only */
+ u8 reserved;
+} __packed;
+
+/* TLV for channel list */
+struct wmi_chan_list {
+ __le32 tag; /* WMI_CHAN_LIST_TAG */
+ __le32 num_chan;
+ struct wmi_chan_list_entry channel_list[];
+} __packed;
+
+struct wmi_bssid_list {
+ __le32 tag; /* WMI_BSSID_LIST_TAG */
+ __le32 num_bssid;
+ struct wmi_mac_addr bssid_list[];
+} __packed;
+
+struct wmi_ie_data {
+ __le32 tag; /* WMI_IE_TAG */
+ __le32 ie_len;
+ u8 ie_data[];
+} __packed;
+
+struct wmi_ssid {
+ __le32 ssid_len;
+ u8 ssid[32];
+} __packed;
+
+struct wmi_ssid_list {
+ __le32 tag; /* WMI_SSID_LIST_TAG */
+ __le32 num_ssids;
+ struct wmi_ssid ssids[];
+} __packed;
+
+/* prefix used by scan requestor ids on the host */
+#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
+
+/* prefix used by scan request ids generated on the host */
+/* host cycles through the lower 12 bits to generate ids */
+#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+
+/* Values lower than this may be refused by some firmware revisions with a scan
+ * completion with a timedout reason.
+ */
+#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
+
+/* Scan priority numbers must be sequential, starting with 0 */
+enum wmi_scan_priority {
+ WMI_SCAN_PRIORITY_VERY_LOW = 0,
+ WMI_SCAN_PRIORITY_LOW,
+ WMI_SCAN_PRIORITY_MEDIUM,
+ WMI_SCAN_PRIORITY_HIGH,
+ WMI_SCAN_PRIORITY_VERY_HIGH,
+ WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
+};
+
+struct wmi_start_scan_common {
+ /* Scan ID */
+ __le32 scan_id;
+ /* Scan requestor ID */
+ __le32 scan_req_id;
+ /* VDEV id(interface) that is requesting scan */
+ __le32 vdev_id;
+ /* Scan Priority, input to scan scheduler */
+ __le32 scan_priority;
+ /* Scan events subscription */
+ __le32 notify_scan_events;
+ /* dwell time in msec on active channels */
+ __le32 dwell_time_active;
+ /* dwell time in msec on passive channels */
+ __le32 dwell_time_passive;
+ /*
+ * min time in msec on the BSS channel,only valid if at least one
+ * VDEV is active
+ */
+ __le32 min_rest_time;
+ /*
+ * max rest time in msec on the BSS channel,only valid if at least
+ * one VDEV is active
+ */
+ /*
+ * the scanner will rest on the bss channel at least min_rest_time
+ * after min_rest_time the scanner will start checking for tx/rx
+ * activity on all VDEVs. if there is no activity the scanner will
+ * switch to off channel. if there is activity the scanner will let
+ * the radio on the bss channel until max_rest_time expires.at
+ * max_rest_time scanner will switch to off channel irrespective of
+ * activity. activity is determined by the idle_time parameter.
+ */
+ __le32 max_rest_time;
+ /*
+ * time before sending next set of probe requests.
+ * The scanner keeps repeating probe requests transmission with
+ * period specified by repeat_probe_time.
+ * The number of probe requests specified depends on the ssid_list
+ * and bssid_list
+ */
+ __le32 repeat_probe_time;
+ /* time in msec between 2 consecutive probe requests with in a set. */
+ __le32 probe_spacing_time;
+ /*
+ * data inactivity time in msec on bss channel that will be used by
+ * scanner for measuring the inactivity.
+ */
+ __le32 idle_time;
+ /* maximum time in msec allowed for scan */
+ __le32 max_scan_time;
+ /*
+ * delay in msec before sending first probe request after switching
+ * to a channel
+ */
+ __le32 probe_delay;
+ /* Scan control flags */
+ __le32 scan_ctrl_flags;
+} __packed;
+
+struct wmi_start_scan_cmd {
+ struct wmi_start_scan_common common;
+ __le32 burst_duration_ms;
+ u8 tlvs[];
+} __packed;
+
+/* This is the definition from 10.X firmware branch */
+struct wmi_10x_start_scan_cmd {
+ struct wmi_start_scan_common common;
+ u8 tlvs[];
+} __packed;
+
+struct wmi_ssid_arg {
+ int len;
+ const u8 *ssid;
+};
+
+struct wmi_bssid_arg {
+ const u8 *bssid;
+};
+
+struct wmi_start_scan_arg {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 scan_priority;
+ u32 notify_scan_events;
+ u32 dwell_time_active;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_ctrl_flags;
+ u32 burst_duration_ms;
+
+ u32 ie_len;
+ u32 n_channels;
+ u32 n_ssids;
+ u32 n_bssids;
+
+ u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
+ u16 channels[64];
+ struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
+ struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+ struct wmi_mac_addr mac_addr;
+ struct wmi_mac_addr mac_mask;
+};
+
+/* scan control flags */
+
+/* passively scan all channels including active channels */
+#define WMI_SCAN_FLAG_PASSIVE 0x1
+/* add wild card ssid probe request even though ssid_list is specified. */
+#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
+/* add cck rates to rates/xrate ie for the generated probe request */
+#define WMI_SCAN_ADD_CCK_RATES 0x4
+/* add ofdm rates to rates/xrate ie for the generated probe request */
+#define WMI_SCAN_ADD_OFDM_RATES 0x8
+/* To enable indication of Chan load and Noise floor to host */
+#define WMI_SCAN_CHAN_STAT_EVENT 0x10
+/* Filter Probe request frames */
+#define WMI_SCAN_FILTER_PROBE_REQ 0x20
+/* When set, DFS channels will not be scanned */
+#define WMI_SCAN_BYPASS_DFS_CHN 0x40
+/* Different FW scan engine may choose to bail out on errors.
+ * Allow the driver to have influence over that.
+ */
+#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+
+/* Use random MAC address for TA for Probe Request frame and add
+ * OUI specified by WMI_SCAN_PROB_REQ_OUI_CMDID to the Probe Request frame.
+ * if OUI is not set by WMI_SCAN_PROB_REQ_OUI_CMDID then the flag is ignored.
+ */
+#define WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ 0x1000
+
+/* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
+#define WMI_SCAN_CLASS_MASK 0xFF000000
+
+enum wmi_stop_scan_type {
+ WMI_SCAN_STOP_ONE = 0x00000000, /* stop by scan_id */
+ WMI_SCAN_STOP_VDEV_ALL = 0x01000000, /* stop by vdev_id */
+ WMI_SCAN_STOP_ALL = 0x04000000, /* stop all scans */
+};
+
+struct wmi_stop_scan_cmd {
+ __le32 scan_req_id;
+ __le32 scan_id;
+ __le32 req_type;
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_stop_scan_arg {
+ u32 req_id;
+ enum wmi_stop_scan_type req_type;
+ union {
+ u32 scan_id;
+ u32 vdev_id;
+ } u;
+};
+
+struct wmi_scan_chan_list_cmd {
+ __le32 num_scan_chans;
+ struct wmi_channel chan_info[];
+} __packed;
+
+struct wmi_scan_chan_list_arg {
+ u32 n_channels;
+ struct wmi_channel_arg *channels;
+};
+
+enum wmi_bss_filter {
+ WMI_BSS_FILTER_NONE = 0, /* no beacons forwarded */
+ WMI_BSS_FILTER_ALL, /* all beacons forwarded */
+ WMI_BSS_FILTER_PROFILE, /* only beacons matching profile */
+ WMI_BSS_FILTER_ALL_BUT_PROFILE, /* all but beacons matching profile */
+ WMI_BSS_FILTER_CURRENT_BSS, /* only beacons matching current BSS */
+ WMI_BSS_FILTER_ALL_BUT_BSS, /* all but beacons matching BSS */
+ WMI_BSS_FILTER_PROBED_SSID, /* beacons matching probed ssid */
+ WMI_BSS_FILTER_LAST_BSS, /* marker only */
+};
+
+enum wmi_scan_event_type {
+ WMI_SCAN_EVENT_STARTED = BIT(0),
+ WMI_SCAN_EVENT_COMPLETED = BIT(1),
+ WMI_SCAN_EVENT_BSS_CHANNEL = BIT(2),
+ WMI_SCAN_EVENT_FOREIGN_CHANNEL = BIT(3),
+ WMI_SCAN_EVENT_DEQUEUED = BIT(4),
+ /* possibly by high-prio scan */
+ WMI_SCAN_EVENT_PREEMPTED = BIT(5),
+ WMI_SCAN_EVENT_START_FAILED = BIT(6),
+ WMI_SCAN_EVENT_RESTARTED = BIT(7),
+ WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT = BIT(8),
+ WMI_SCAN_EVENT_MAX = BIT(15),
+};
+
+enum wmi_scan_completion_reason {
+ WMI_SCAN_REASON_COMPLETED,
+ WMI_SCAN_REASON_CANCELLED,
+ WMI_SCAN_REASON_PREEMPTED,
+ WMI_SCAN_REASON_TIMEDOUT,
+ WMI_SCAN_REASON_INTERNAL_FAILURE,
+ WMI_SCAN_REASON_MAX,
+};
+
+struct wmi_scan_event {
+ __le32 event_type; /* %WMI_SCAN_EVENT_ */
+ __le32 reason; /* %WMI_SCAN_REASON_ */
+ __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+ __le32 scan_req_id;
+ __le32 scan_id;
+ __le32 vdev_id;
+} __packed;
+
+/*
+ * This defines how much headroom is kept in the
+ * receive frame between the descriptor and the
+ * payload, in order for the WMI PHY error and
+ * management handler to insert header contents.
+ *
+ * This is in bytes.
+ */
+#define WMI_MGMT_RX_HDR_HEADROOM 52
+
+/*
+ * This event will be used for sending scan results
+ * as well as rx mgmt frames to the host. The rx buffer
+ * will be sent as part of this WMI event. It would be a
+ * good idea to pass all the fields in the RX status
+ * descriptor up to the host.
+ */
+struct wmi_mgmt_rx_hdr_v1 {
+ __le32 channel;
+ __le32 snr;
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status; /* %WMI_RX_STATUS_ */
+} __packed;
+
+struct wmi_mgmt_rx_hdr_v2 {
+ struct wmi_mgmt_rx_hdr_v1 v1;
+ __le32 rssi_ctl[4];
+} __packed;
+
+struct wmi_mgmt_rx_event_v1 {
+ struct wmi_mgmt_rx_hdr_v1 hdr;
+ u8 buf[];
+} __packed;
+
+struct wmi_mgmt_rx_event_v2 {
+ struct wmi_mgmt_rx_hdr_v2 hdr;
+ u8 buf[];
+} __packed;
+
+struct wmi_10_4_mgmt_rx_hdr {
+ __le32 channel;
+ __le32 snr;
+ u8 rssi_ctl[4];
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status;
+} __packed;
+
+struct wmi_10_4_mgmt_rx_event {
+ struct wmi_10_4_mgmt_rx_hdr hdr;
+ u8 buf[];
+} __packed;
+
+struct wmi_mgmt_rx_ext_info {
+ __le64 rx_mac_timestamp;
+} __packed __aligned(4);
+
+#define WMI_RX_STATUS_OK 0x00
+#define WMI_RX_STATUS_ERR_CRC 0x01
+#define WMI_RX_STATUS_ERR_DECRYPT 0x08
+#define WMI_RX_STATUS_ERR_MIC 0x10
+#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+/* Extension data at the end of mgmt frame */
+#define WMI_RX_STATUS_EXT_INFO 0x40
+
+#define PHY_ERROR_GEN_SPECTRAL_SCAN 0x26
+#define PHY_ERROR_GEN_FALSE_RADAR_EXT 0x24
+#define PHY_ERROR_GEN_RADAR 0x05
+
+#define PHY_ERROR_10_4_RADAR_MASK 0x4
+#define PHY_ERROR_10_4_SPECTRAL_SCAN_MASK 0x4000000
+
+enum phy_err_type {
+ PHY_ERROR_UNKNOWN,
+ PHY_ERROR_SPECTRAL_SCAN,
+ PHY_ERROR_FALSE_RADAR_EXT,
+ PHY_ERROR_RADAR
+};
+
+struct wmi_phyerr {
+ __le32 tsf_timestamp;
+ __le16 freq1;
+ __le16 freq2;
+ u8 rssi_combined;
+ u8 chan_width_mhz;
+ u8 phy_err_code;
+ u8 rsvd0;
+ __le32 rssi_chains[4];
+ __le16 nf_chains[4];
+ __le32 buf_len;
+ u8 buf[];
+} __packed;
+
+struct wmi_phyerr_event {
+ __le32 num_phyerrs;
+ __le32 tsf_l32;
+ __le32 tsf_u32;
+
+ /* array of struct wmi_phyerr */
+ u8 phyerrs[];
+} __packed;
+
+struct wmi_10_4_phyerr_event {
+ __le32 tsf_l32;
+ __le32 tsf_u32;
+ __le16 freq1;
+ __le16 freq2;
+ u8 rssi_combined;
+ u8 chan_width_mhz;
+ u8 phy_err_code;
+ u8 rsvd0;
+ __le32 rssi_chains[4];
+ __le16 nf_chains[4];
+ __le32 phy_err_mask[2];
+ __le32 tsf_timestamp;
+ __le32 buf_len;
+ u8 buf[];
+} __packed;
+
+struct wmi_radar_found_info {
+ __le32 pri_min;
+ __le32 pri_max;
+ __le32 width_min;
+ __le32 width_max;
+ __le32 sidx_min;
+ __le32 sidx_max;
+} __packed;
+
+enum wmi_radar_confirmation_status {
+ /* Detected radar was due to SW pulses */
+ WMI_SW_RADAR_DETECTED = 0,
+
+ WMI_RADAR_DETECTION_FAIL = 1,
+
+ /* Real radar detected */
+ WMI_HW_RADAR_DETECTED = 2,
+};
+
+#define PHYERR_TLV_SIG 0xBB
+#define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB
+#define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8
+#define PHYERR_TLV_TAG_SPECTRAL_SUMMARY_REPORT 0xF9
+
+struct phyerr_radar_report {
+ __le32 reg0; /* RADAR_REPORT_REG0_* */
+ __le32 reg1; /* RADAR_REPORT_REG1_* */
+} __packed;
+
+#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_MASK 0x80000000
+#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_LSB 31
+
+#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_MASK 0x40000000
+#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_LSB 30
+
+#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_MASK 0x3FF00000
+#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_LSB 20
+
+#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_MASK 0x000F0000
+#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_LSB 16
+
+#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_MASK 0x0000FC00
+#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_LSB 10
+
+#define RADAR_REPORT_REG0_PULSE_SIDX_MASK 0x000003FF
+#define RADAR_REPORT_REG0_PULSE_SIDX_LSB 0
+
+#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_MASK 0x80000000
+#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_LSB 31
+
+#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_MASK 0x7F000000
+#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_LSB 24
+
+#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_MASK 0x00FF0000
+#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_LSB 16
+
+#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_MASK 0x0000FF00
+#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_LSB 8
+
+#define RADAR_REPORT_REG1_PULSE_DUR_MASK 0x000000FF
+#define RADAR_REPORT_REG1_PULSE_DUR_LSB 0
+
+struct phyerr_fft_report {
+ __le32 reg0; /* SEARCH_FFT_REPORT_REG0_ * */
+ __le32 reg1; /* SEARCH_FFT_REPORT_REG1_ * */
+} __packed;
+
+#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_MASK 0xFF800000
+#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_LSB 23
+
+#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_MASK 0x007FC000
+#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_LSB 14
+
+#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_MASK 0x00003000
+#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_LSB 12
+
+#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_MASK 0x00000FFF
+#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_LSB 0
+
+#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_MASK 0xFC000000
+#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_LSB 26
+
+#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_MASK 0x03FC0000
+#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_LSB 18
+
+#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_MASK 0x0003FF00
+#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_LSB 8
+
+#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_MASK 0x000000FF
+#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_LSB 0
+
+struct phyerr_tlv {
+ __le16 len;
+ u8 tag;
+ u8 sig;
+} __packed;
+
+#define DFS_RSSI_POSSIBLY_FALSE 50
+#define DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE 40
+
+struct wmi_mgmt_tx_hdr {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 tx_rate;
+ __le32 tx_power;
+ __le32 buf_len;
+} __packed;
+
+struct wmi_mgmt_tx_cmd {
+ struct wmi_mgmt_tx_hdr hdr;
+ u8 buf[];
+} __packed;
+
+struct wmi_echo_event {
+ __le32 value;
+} __packed;
+
+struct wmi_echo_cmd {
+ __le32 value;
+} __packed;
+
+struct wmi_pdev_set_regdomain_cmd {
+ __le32 reg_domain;
+ __le32 reg_domain_2G;
+ __le32 reg_domain_5G;
+ __le32 conformance_test_limit_2G;
+ __le32 conformance_test_limit_5G;
+} __packed;
+
+enum wmi_dfs_region {
+ /* Uninitialized dfs domain */
+ WMI_UNINIT_DFS_DOMAIN = 0,
+
+ /* FCC3 dfs domain */
+ WMI_FCC_DFS_DOMAIN = 1,
+
+ /* ETSI dfs domain */
+ WMI_ETSI_DFS_DOMAIN = 2,
+
+ /*Japan dfs domain */
+ WMI_MKK4_DFS_DOMAIN = 3,
+};
+
+struct wmi_pdev_set_regdomain_cmd_10x {
+ __le32 reg_domain;
+ __le32 reg_domain_2G;
+ __le32 reg_domain_5G;
+ __le32 conformance_test_limit_2G;
+ __le32 conformance_test_limit_5G;
+
+ /* dfs domain from wmi_dfs_region */
+ __le32 dfs_domain;
+} __packed;
+
+/* Command to set/unset chip in quiet mode */
+struct wmi_pdev_set_quiet_cmd {
+ /* period in TUs */
+ __le32 period;
+
+ /* duration in TUs */
+ __le32 duration;
+
+ /* offset in TUs */
+ __le32 next_start;
+
+ /* enable/disable */
+ __le32 enabled;
+} __packed;
+
+/*
+ * 802.11g protection mode.
+ */
+enum ath10k_protmode {
+ ATH10K_PROT_NONE = 0, /* no protection */
+ ATH10K_PROT_CTSONLY = 1, /* CTS to self */
+ ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */
+};
+
+enum wmi_rtscts_profile {
+ WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+ WMI_RTSCTS_FOR_SECOND_RATESERIES,
+ WMI_RTSCTS_ACROSS_SW_RETRIES
+};
+
+#define WMI_RTSCTS_ENABLED 1
+#define WMI_RTSCTS_SET_MASK 0x0f
+#define WMI_RTSCTS_SET_LSB 0
+
+#define WMI_RTSCTS_PROFILE_MASK 0xf0
+#define WMI_RTSCTS_PROFILE_LSB 4
+
+enum wmi_beacon_gen_mode {
+ WMI_BEACON_STAGGERED_MODE = 0,
+ WMI_BEACON_BURST_MODE = 1
+};
+
+enum wmi_csa_event_ies_present_flag {
+ WMI_CSA_IE_PRESENT = 0x00000001,
+ WMI_XCSA_IE_PRESENT = 0x00000002,
+ WMI_WBW_IE_PRESENT = 0x00000004,
+ WMI_CSWARP_IE_PRESENT = 0x00000008,
+};
+
+/* wmi CSA receive event from beacon frame */
+struct wmi_csa_event {
+ __le32 i_fc_dur;
+ /* Bit 0-15: FC */
+ /* Bit 16-31: DUR */
+ struct wmi_mac_addr i_addr1;
+ struct wmi_mac_addr i_addr2;
+ __le32 csa_ie[2];
+ __le32 xcsa_ie[2];
+ __le32 wb_ie[2];
+ __le32 cswarp_ie;
+ __le32 ies_present_flag; /* wmi_csa_event_ies_present_flag */
+} __packed;
+
+/* the definition of different PDEV parameters */
+#define PDEV_DEFAULT_STATS_UPDATE_PERIOD 500
+#define VDEV_DEFAULT_STATS_UPDATE_PERIOD 500
+#define PEER_DEFAULT_STATS_UPDATE_PERIOD 500
+
+struct wmi_pdev_param_map {
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 txpower_limit2g;
+ u32 txpower_limit5g;
+ u32 txpower_scale;
+ u32 beacon_gen_mode;
+ u32 beacon_tx_mode;
+ u32 resmgr_offchan_mode;
+ u32 protection_mode;
+ u32 dynamic_bw;
+ u32 non_agg_sw_retry_th;
+ u32 agg_sw_retry_th;
+ u32 sta_kickout_th;
+ u32 ac_aggrsize_scaling;
+ u32 ltr_enable;
+ u32 ltr_ac_latency_be;
+ u32 ltr_ac_latency_bk;
+ u32 ltr_ac_latency_vi;
+ u32 ltr_ac_latency_vo;
+ u32 ltr_ac_latency_timeout;
+ u32 ltr_sleep_override;
+ u32 ltr_rx_override;
+ u32 ltr_tx_activity_timeout;
+ u32 l1ss_enable;
+ u32 dsleep_enable;
+ u32 pcielp_txbuf_flush;
+ u32 pcielp_txbuf_watermark;
+ u32 pcielp_txbuf_tmo_en;
+ u32 pcielp_txbuf_tmo_value;
+ u32 pdev_stats_update_period;
+ u32 vdev_stats_update_period;
+ u32 peer_stats_update_period;
+ u32 bcnflt_stats_update_period;
+ u32 pmf_qos;
+ u32 arp_ac_override;
+ u32 dcs;
+ u32 ani_enable;
+ u32 ani_poll_period;
+ u32 ani_listen_period;
+ u32 ani_ofdm_level;
+ u32 ani_cck_level;
+ u32 dyntxchain;
+ u32 proxy_sta;
+ u32 idle_ps_config;
+ u32 power_gating_sleep;
+ u32 fast_channel_reset;
+ u32 burst_dur;
+ u32 burst_enable;
+ u32 cal_period;
+ u32 aggr_burst;
+ u32 rx_decap_mode;
+ u32 smart_antenna_default_antenna;
+ u32 igmpmld_override;
+ u32 igmpmld_tid;
+ u32 antenna_gain;
+ u32 rx_filter;
+ u32 set_mcast_to_ucast_tid;
+ u32 proxy_sta_mode;
+ u32 set_mcast2ucast_mode;
+ u32 set_mcast2ucast_buffer;
+ u32 remove_mcast2ucast_buffer;
+ u32 peer_sta_ps_statechg_enable;
+ u32 igmpmld_ac_override;
+ u32 block_interbss;
+ u32 set_disable_reset_cmdid;
+ u32 set_msdu_ttl_cmdid;
+ u32 set_ppdu_duration_cmdid;
+ u32 txbf_sound_period_cmdid;
+ u32 set_promisc_mode_cmdid;
+ u32 set_burst_mode_cmdid;
+ u32 en_stats;
+ u32 mu_group_policy;
+ u32 noise_detection;
+ u32 noise_threshold;
+ u32 dpd_enable;
+ u32 set_mcast_bcast_echo;
+ u32 atf_strict_sch;
+ u32 atf_sched_duration;
+ u32 ant_plzn;
+ u32 mgmt_retry_limit;
+ u32 sensitivity_level;
+ u32 signed_txpower_2g;
+ u32 signed_txpower_5g;
+ u32 enable_per_tid_amsdu;
+ u32 enable_per_tid_ampdu;
+ u32 cca_threshold;
+ u32 rts_fixed_rate;
+ u32 pdev_reset;
+ u32 wapi_mbssid_offset;
+ u32 arp_srcaddr;
+ u32 arp_dstaddr;
+ u32 enable_btcoex;
+ u32 rfkill_config;
+ u32 rfkill_enable;
+ u32 peer_stats_info_enable;
+};
+
+#define WMI_PDEV_PARAM_UNSUPPORTED 0
+
+enum wmi_pdev_param {
+ /* TX chain mask */
+ WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ /* RX chain mask */
+ WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ /* TX power limit for 2G Radio */
+ WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ /* TX power limit for 5G Radio */
+ WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ /* TX power scale */
+ WMI_PDEV_PARAM_TXPOWER_SCALE,
+ /* Beacon generation mode . 0: host, 1: target */
+ WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ /* Beacon generation mode . 0: staggered 1: bursted */
+ WMI_PDEV_PARAM_BEACON_TX_MODE,
+ /*
+ * Resource manager off chan mode .
+ * 0: turn off offchan mode. 1: turn on offchan mode
+ */
+ WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ /*
+ * Protection mode:
+ * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+ */
+ WMI_PDEV_PARAM_PROTECTION_MODE,
+ /*
+ * Dynamic bandwidth - 0: disable, 1: enable
+ *
+ * When enabled HW rate control tries different bandwidths when
+ * retransmitting frames.
+ */
+ WMI_PDEV_PARAM_DYNAMIC_BW,
+ /* Non aggregate/ 11g sw retry threshold.0-disable */
+ WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ /* aggregate sw retry threshold. 0-disable*/
+ WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ /* Station kickout threshold (non of consecutive failures).0-disable */
+ WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ /* Aggerate size scaling configuration per AC */
+ WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ /* LTR enable */
+ WMI_PDEV_PARAM_LTR_ENABLE,
+ /* LTR latency for BE, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ /* LTR latency for BK, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ /* LTR latency for VI, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ /* LTR latency for VO, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ /* LTR AC latency timeout, in ms */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ /* LTR platform latency override, in us */
+ WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ /* LTR-RX override, in us */
+ WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ /* Tx activity timeout for LTR, in us */
+ WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ /* L1SS state machine enable */
+ WMI_PDEV_PARAM_L1SS_ENABLE,
+ /* Deep sleep state machine enable */
+ WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ /* RX buffering flush enable */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ /* RX buffering matermark */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ /* RX buffering timeout enable */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ /* RX buffering timeout value */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ /* pdev level stats update period in ms */
+ WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ /* vdev level stats update period in ms */
+ WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ /* peer level stats update period in ms */
+ WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ /* beacon filter status update period */
+ WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+ WMI_PDEV_PARAM_PMF_QOS,
+ /* Access category on which ARP frames are sent */
+ WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ /* DCS configuration */
+ WMI_PDEV_PARAM_DCS,
+ /* Enable/Disable ANI on target */
+ WMI_PDEV_PARAM_ANI_ENABLE,
+ /* configure the ANI polling period */
+ WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ /* configure the ANI listening period */
+ WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ /* configure OFDM immunity level */
+ WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ /* configure CCK immunity level */
+ WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ /* Enable/Disable CDD for 1x1 STAs in rate control module */
+ WMI_PDEV_PARAM_DYNTXCHAIN,
+ /* Enable/Disable proxy STA */
+ WMI_PDEV_PARAM_PROXY_STA,
+ /* Enable/Disable low power state when all VDEVs are inactive/idle. */
+ WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ /* Enable/Disable power gating sleep */
+ WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+};
+
+enum wmi_10x_pdev_param {
+ /* TX chian mask */
+ WMI_10X_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ /* RX chian mask */
+ WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ /* TX power limit for 2G Radio */
+ WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ /* TX power limit for 5G Radio */
+ WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ /* TX power scale */
+ WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ /* Beacon generation mode . 0: host, 1: target */
+ WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ /* Beacon generation mode . 0: staggered 1: bursted */
+ WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ /*
+ * Resource manager off chan mode .
+ * 0: turn off offchan mode. 1: turn on offchan mode
+ */
+ WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ /*
+ * Protection mode:
+ * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+ */
+ WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ /* Dynamic bandwidth 0: disable 1: enable */
+ WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ /* Non aggregate/ 11g sw retry threshold.0-disable */
+ WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ /* aggregate sw retry threshold. 0-disable*/
+ WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ /* Station kickout threshold (non of consecutive failures).0-disable */
+ WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ /* Aggerate size scaling configuration per AC */
+ WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ /* LTR enable */
+ WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ /* LTR latency for BE, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ /* LTR latency for BK, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ /* LTR latency for VI, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ /* LTR latency for VO, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ /* LTR AC latency timeout, in ms */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ /* LTR platform latency override, in us */
+ WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ /* LTR-RX override, in us */
+ WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ /* Tx activity timeout for LTR, in us */
+ WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ /* L1SS state machine enable */
+ WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ /* Deep sleep state machine enable */
+ WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ /* pdev level stats update period in ms */
+ WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ /* vdev level stats update period in ms */
+ WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ /* peer level stats update period in ms */
+ WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ /* beacon filter status update period */
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+ WMI_10X_PDEV_PARAM_PMF_QOS,
+ /* Access category on which ARP and DHCP frames are sent */
+ WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ /* DCS configuration */
+ WMI_10X_PDEV_PARAM_DCS,
+ /* Enable/Disable ANI on target */
+ WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ /* configure the ANI polling period */
+ WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ /* configure the ANI listening period */
+ WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ /* configure OFDM immunity level */
+ WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ /* configure CCK immunity level */
+ WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ /* Enable/Disable CDD for 1x1 STAs in rate control module */
+ WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ /* Enable/Disable Fast channel reset*/
+ WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ /* Set Bursting DUR */
+ WMI_10X_PDEV_PARAM_BURST_DUR,
+ /* Set Bursting Enable*/
+ WMI_10X_PDEV_PARAM_BURST_ENABLE,
+
+ /* following are available as of firmware 10.2 */
+ WMI_10X_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ WMI_10X_PDEV_PARAM_IGMPMLD_OVERRIDE,
+ WMI_10X_PDEV_PARAM_IGMPMLD_TID,
+ WMI_10X_PDEV_PARAM_ANTENNA_GAIN,
+ WMI_10X_PDEV_PARAM_RX_DECAP_MODE,
+ WMI_10X_PDEV_PARAM_RX_FILTER,
+ WMI_10X_PDEV_PARAM_SET_MCAST_TO_UCAST_TID,
+ WMI_10X_PDEV_PARAM_PROXY_STA_MODE,
+ WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
+ WMI_10X_PDEV_PARAM_RTS_FIXED_RATE,
+ WMI_10X_PDEV_PARAM_CAL_PERIOD,
+ WMI_10X_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_10X_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_10X_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_10X_PDEV_PARAM_PDEV_RESET
+};
+
+enum wmi_10_4_pdev_param {
+ WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+ WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+ WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+ WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+ WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+ WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+ WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+ WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+ WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+ WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_PMF_QOS,
+ WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_DCS,
+ WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+ WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+ WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+ WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+ WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+ WMI_10_4_PDEV_PARAM_PROXY_STA,
+ WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+ WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+ WMI_10_4_PDEV_PARAM_AGGR_BURST,
+ WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+ WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+ WMI_10_4_PDEV_PARAM_BURST_DUR,
+ WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+ WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+ WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+ WMI_10_4_PDEV_PARAM_RX_FILTER,
+ WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+ WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+ WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+ WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+ WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+ WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+ WMI_10_4_PDEV_PARAM_EN_STATS,
+ WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+ WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+ WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+ WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+ WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+ WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_10_4_PDEV_PARAM_ANT_PLZN,
+ WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+ WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+ WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+ WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+ WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+ WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+ WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+ WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+ WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+ WMI_10_4_PDEV_PARAM_PDEV_RESET,
+ WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+ WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+ WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+ WMI_10_4_PDEV_PARAM_TXPOWER_DECR_DB,
+ WMI_10_4_PDEV_PARAM_RX_BATCHMODE,
+ WMI_10_4_PDEV_PARAM_PACKET_AGGR_DELAY,
+ WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCH,
+ WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR,
+ WMI_10_4_PDEV_PARAM_CUST_TXPOWER_SCALE,
+ WMI_10_4_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
+ WMI_10_4_PDEV_PARAM_ATF_SSID_GROUP_POLICY,
+ WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
+};
+
+struct wmi_pdev_set_param_cmd {
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+struct wmi_pdev_set_base_macaddr_cmd {
+ struct wmi_mac_addr mac_addr;
+} __packed;
+
+/* valid period is 1 ~ 60000ms, unit in millisecond */
+#define WMI_PDEV_PARAM_CAL_PERIOD_MAX 60000
+
+struct wmi_pdev_get_tpc_config_cmd {
+ /* parameter */
+ __le32 param;
+} __packed;
+
+#define WMI_TPC_CONFIG_PARAM 1
+#define WMI_TPC_FINAL_RATE_MAX 240
+#define WMI_TPC_TX_N_CHAIN 4
+#define WMI_TPC_RATE_MAX (WMI_TPC_TX_N_CHAIN * 65)
+#define WMI_TPC_PREAM_TABLE_MAX 10
+#define WMI_TPC_FLAG 3
+#define WMI_TPC_BUF_SIZE 10
+#define WMI_TPC_BEAMFORMING 2
+
+enum wmi_tpc_table_type {
+ WMI_TPC_TABLE_TYPE_CDD = 0,
+ WMI_TPC_TABLE_TYPE_STBC = 1,
+ WMI_TPC_TABLE_TYPE_TXBF = 2,
+};
+
+enum wmi_tpc_config_event_flag {
+ WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD = 0x1,
+ WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC = 0x2,
+ WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF = 0x4,
+};
+
+struct wmi_pdev_tpc_config_event {
+ __le32 reg_domain;
+ __le32 chan_freq;
+ __le32 phy_mode;
+ __le32 twice_antenna_reduction;
+ __le32 twice_max_rd_power;
+ a_sle32 twice_antenna_gain;
+ __le32 power_limit;
+ __le32 rate_max;
+ __le32 num_tx_chain;
+ __le32 ctl;
+ __le32 flags;
+ s8 max_reg_allow_pow[WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agcdd[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agstbc[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agtxbf[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ u8 rates_array[WMI_TPC_RATE_MAX];
+} __packed;
+
+/* Transmit power scale factor. */
+enum wmi_tp_scale {
+ WMI_TP_SCALE_MAX = 0, /* no scaling (default) */
+ WMI_TP_SCALE_50 = 1, /* 50% of max (-3 dBm) */
+ WMI_TP_SCALE_25 = 2, /* 25% of max (-6 dBm) */
+ WMI_TP_SCALE_12 = 3, /* 12% of max (-9 dBm) */
+ WMI_TP_SCALE_MIN = 4, /* min, but still on */
+ WMI_TP_SCALE_SIZE = 5, /* max num of enum */
+};
+
+struct wmi_pdev_tpc_final_table_event {
+ __le32 reg_domain;
+ __le32 chan_freq;
+ __le32 phy_mode;
+ __le32 twice_antenna_reduction;
+ __le32 twice_max_rd_power;
+ a_sle32 twice_antenna_gain;
+ __le32 power_limit;
+ __le32 rate_max;
+ __le32 num_tx_chain;
+ __le32 ctl;
+ __le32 flags;
+ s8 max_reg_allow_pow[WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agcdd[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agstbc[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agtxbf[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ u8 rates_array[WMI_TPC_FINAL_RATE_MAX];
+ u8 ctl_power_table[WMI_TPC_BEAMFORMING][WMI_TPC_TX_N_CHAIN]
+ [WMI_TPC_TX_N_CHAIN];
+} __packed;
+
+struct wmi_pdev_get_tpc_table_cmd {
+ __le32 param;
+} __packed;
+
+enum wmi_tpc_pream_2ghz {
+ WMI_TPC_PREAM_2GHZ_CCK = 0,
+ WMI_TPC_PREAM_2GHZ_OFDM,
+ WMI_TPC_PREAM_2GHZ_HT20,
+ WMI_TPC_PREAM_2GHZ_HT40,
+ WMI_TPC_PREAM_2GHZ_VHT20,
+ WMI_TPC_PREAM_2GHZ_VHT40,
+ WMI_TPC_PREAM_2GHZ_VHT80,
+};
+
+enum wmi_tpc_pream_5ghz {
+ WMI_TPC_PREAM_5GHZ_OFDM = 1,
+ WMI_TPC_PREAM_5GHZ_HT20,
+ WMI_TPC_PREAM_5GHZ_HT40,
+ WMI_TPC_PREAM_5GHZ_VHT20,
+ WMI_TPC_PREAM_5GHZ_VHT40,
+ WMI_TPC_PREAM_5GHZ_VHT80,
+ WMI_TPC_PREAM_5GHZ_HTCUP,
+};
+
+#define WMI_PEER_PS_STATE_DISABLED 2
+
+struct wmi_peer_sta_ps_state_chg_event {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_ps_state;
+} __packed;
+
+#define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32)
+
+struct wmi_debug_mesg_event {
+ /* message buffer, NULL terminated */
+ char bufp[WMI_MAX_DEBUG_MESG];
+} __packed;
+
+enum {
+ /* P2P device */
+ VDEV_SUBTYPE_P2PDEV = 0,
+ /* P2P client */
+ VDEV_SUBTYPE_P2PCLI,
+ /* P2P GO */
+ VDEV_SUBTYPE_P2PGO,
+ /* BT3.0 HS */
+ VDEV_SUBTYPE_BT,
+};
+
+struct wmi_pdev_set_channel_cmd {
+ /* idnore power , only use flags , mode and freq */
+ struct wmi_channel chan;
+} __packed;
+
+struct wmi_pdev_pktlog_enable_cmd {
+ __le32 ev_bitmap;
+} __packed;
+
+/* Customize the DSCP (bit) to TID (0-7) mapping for QOS */
+#define WMI_DSCP_MAP_MAX (64)
+struct wmi_pdev_set_dscp_tid_map_cmd {
+ /* map indicating DSCP to TID conversion */
+ __le32 dscp_to_tid_map[WMI_DSCP_MAP_MAX];
+} __packed;
+
+enum mcast_bcast_rate_id {
+ WMI_SET_MCAST_RATE,
+ WMI_SET_BCAST_RATE
+};
+
+struct mcast_bcast_rate {
+ enum mcast_bcast_rate_id rate_id;
+ __le32 rate;
+} __packed;
+
+struct wmi_wmm_params {
+ __le32 cwmin;
+ __le32 cwmax;
+ __le32 aifs;
+ __le32 txop;
+ __le32 acm;
+ __le32 no_ack;
+} __packed;
+
+struct wmi_pdev_set_wmm_params {
+ struct wmi_wmm_params ac_be;
+ struct wmi_wmm_params ac_bk;
+ struct wmi_wmm_params ac_vi;
+ struct wmi_wmm_params ac_vo;
+} __packed;
+
+struct wmi_wmm_params_arg {
+ u32 cwmin;
+ u32 cwmax;
+ u32 aifs;
+ u32 txop;
+ u32 acm;
+ u32 no_ack;
+};
+
+struct wmi_wmm_params_all_arg {
+ struct wmi_wmm_params_arg ac_be;
+ struct wmi_wmm_params_arg ac_bk;
+ struct wmi_wmm_params_arg ac_vi;
+ struct wmi_wmm_params_arg ac_vo;
+};
+
+struct wmi_pdev_stats_tx {
+ /* Num HTT cookies queued to dispatch list */
+ __le32 comp_queued;
+
+ /* Num HTT cookies dispatched */
+ __le32 comp_delivered;
+
+ /* Num MSDU queued to WAL */
+ __le32 msdu_enqued;
+
+ /* Num MPDU queue to WAL */
+ __le32 mpdu_enqued;
+
+ /* Num MSDUs dropped by WMM limit */
+ __le32 wmm_drop;
+
+ /* Num Local frames queued */
+ __le32 local_enqued;
+
+ /* Num Local frames done */
+ __le32 local_freed;
+
+ /* Num queued to HW */
+ __le32 hw_queued;
+
+ /* Num PPDU reaped from HW */
+ __le32 hw_reaped;
+
+ /* Num underruns */
+ __le32 underrun;
+
+ /* Num PPDUs cleaned up in TX abort */
+ __le32 tx_abort;
+
+ /* Num MPDUs requeued by SW */
+ __le32 mpdus_requeued;
+
+ /* excessive retries */
+ __le32 tx_ko;
+
+ /* data hw rate code */
+ __le32 data_rc;
+
+ /* Scheduler self triggers */
+ __le32 self_triggers;
+
+ /* frames dropped due to excessive sw retries */
+ __le32 sw_retry_failure;
+
+ /* illegal rate phy errors */
+ __le32 illgl_rate_phy_err;
+
+ /* wal pdev continuous xretry */
+ __le32 pdev_cont_xretry;
+
+ /* wal pdev continuous xretry */
+ __le32 pdev_tx_timeout;
+
+ /* wal pdev resets */
+ __le32 pdev_resets;
+
+ /* frames dropped due to non-availability of stateless TIDs */
+ __le32 stateless_tid_alloc_failure;
+
+ __le32 phy_underrun;
+
+ /* MPDU is more than txop limit */
+ __le32 txop_ovf;
+} __packed;
+
+struct wmi_10_4_pdev_stats_tx {
+ /* Num HTT cookies queued to dispatch list */
+ __le32 comp_queued;
+
+ /* Num HTT cookies dispatched */
+ __le32 comp_delivered;
+
+ /* Num MSDU queued to WAL */
+ __le32 msdu_enqued;
+
+ /* Num MPDU queue to WAL */
+ __le32 mpdu_enqued;
+
+ /* Num MSDUs dropped by WMM limit */
+ __le32 wmm_drop;
+
+ /* Num Local frames queued */
+ __le32 local_enqued;
+
+ /* Num Local frames done */
+ __le32 local_freed;
+
+ /* Num queued to HW */
+ __le32 hw_queued;
+
+ /* Num PPDU reaped from HW */
+ __le32 hw_reaped;
+
+ /* Num underruns */
+ __le32 underrun;
+
+ /* HW Paused. */
+ __le32 hw_paused;
+
+ /* Num PPDUs cleaned up in TX abort */
+ __le32 tx_abort;
+
+ /* Num MPDUs requeued by SW */
+ __le32 mpdus_requeued;
+
+ /* excessive retries */
+ __le32 tx_ko;
+
+ /* data hw rate code */
+ __le32 data_rc;
+
+ /* Scheduler self triggers */
+ __le32 self_triggers;
+
+ /* frames dropped due to excessive sw retries */
+ __le32 sw_retry_failure;
+
+ /* illegal rate phy errors */
+ __le32 illgl_rate_phy_err;
+
+ /* wal pdev continuous xretry */
+ __le32 pdev_cont_xretry;
+
+ /* wal pdev tx timeouts */
+ __le32 pdev_tx_timeout;
+
+ /* wal pdev resets */
+ __le32 pdev_resets;
+
+ /* frames dropped due to non-availability of stateless TIDs */
+ __le32 stateless_tid_alloc_failure;
+
+ __le32 phy_underrun;
+
+ /* MPDU is more than txop limit */
+ __le32 txop_ovf;
+
+ /* Number of Sequences posted */
+ __le32 seq_posted;
+
+ /* Number of Sequences failed queueing */
+ __le32 seq_failed_queueing;
+
+ /* Number of Sequences completed */
+ __le32 seq_completed;
+
+ /* Number of Sequences restarted */
+ __le32 seq_restarted;
+
+ /* Number of MU Sequences posted */
+ __le32 mu_seq_posted;
+
+ /* Num MPDUs flushed by SW, HWPAUSED,SW TXABORT(Reset,channel change) */
+ __le32 mpdus_sw_flush;
+
+ /* Num MPDUs filtered by HW, all filter condition (TTL expired) */
+ __le32 mpdus_hw_filter;
+
+ /* Num MPDUs truncated by PDG
+ * (TXOP, TBTT, PPDU_duration based on rate, dyn_bw)
+ */
+ __le32 mpdus_truncated;
+
+ /* Num MPDUs that was tried but didn't receive ACK or BA */
+ __le32 mpdus_ack_failed;
+
+ /* Num MPDUs that was dropped due to expiry. */
+ __le32 mpdus_expired;
+} __packed;
+
+struct wmi_pdev_stats_rx {
+ /* Cnts any change in ring routing mid-ppdu */
+ __le32 mid_ppdu_route_change;
+
+ /* Total number of statuses processed */
+ __le32 status_rcvd;
+
+ /* Extra frags on rings 0-3 */
+ __le32 r0_frags;
+ __le32 r1_frags;
+ __le32 r2_frags;
+ __le32 r3_frags;
+
+ /* MSDUs / MPDUs delivered to HTT */
+ __le32 htt_msdus;
+ __le32 htt_mpdus;
+
+ /* MSDUs / MPDUs delivered to local stack */
+ __le32 loc_msdus;
+ __le32 loc_mpdus;
+
+ /* AMSDUs that have more MSDUs than the status ring size */
+ __le32 oversize_amsdu;
+
+ /* Number of PHY errors */
+ __le32 phy_errs;
+
+ /* Number of PHY errors drops */
+ __le32 phy_err_drop;
+
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ __le32 mpdu_errs;
+} __packed;
+
+struct wmi_pdev_stats_peer {
+ /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
+ __le32 dummy;
+} __packed;
+
+enum wmi_stats_id {
+ WMI_STAT_PEER = BIT(0),
+ WMI_STAT_AP = BIT(1),
+ WMI_STAT_PDEV = BIT(2),
+ WMI_STAT_VDEV = BIT(3),
+ WMI_STAT_BCNFLT = BIT(4),
+ WMI_STAT_VDEV_RATE = BIT(5),
+};
+
+enum wmi_10_4_stats_id {
+ WMI_10_4_STAT_PEER = BIT(0),
+ WMI_10_4_STAT_AP = BIT(1),
+ WMI_10_4_STAT_INST = BIT(2),
+ WMI_10_4_STAT_PEER_EXTD = BIT(3),
+ WMI_10_4_STAT_VDEV_EXTD = BIT(4),
+};
+
+enum wmi_tlv_stats_id {
+ WMI_TLV_STAT_PEER = BIT(0),
+ WMI_TLV_STAT_AP = BIT(1),
+ WMI_TLV_STAT_PDEV = BIT(2),
+ WMI_TLV_STAT_VDEV = BIT(3),
+ WMI_TLV_STAT_PEER_EXTD = BIT(10),
+};
+
+struct wlan_inst_rssi_args {
+ __le16 cfg_retry_count;
+ __le16 retry_count;
+};
+
+struct wmi_request_stats_cmd {
+ __le32 stats_id;
+
+ __le32 vdev_id;
+
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+
+ /* Instantaneous RSSI arguments */
+ struct wlan_inst_rssi_args inst_rssi_args;
+} __packed;
+
+enum wmi_peer_stats_info_request_type {
+ /* request stats of one specified peer */
+ WMI_REQUEST_ONE_PEER_STATS_INFO = 0x01,
+ /* request stats of all peers belong to specified VDEV */
+ WMI_REQUEST_VDEV_ALL_PEER_STATS_INFO = 0x02,
+};
+
+/* Suspend option */
+enum {
+ /* suspend */
+ WMI_PDEV_SUSPEND,
+
+ /* suspend and disable all interrupts */
+ WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+};
+
+struct wmi_pdev_suspend_cmd {
+ /* suspend option sent to target */
+ __le32 suspend_opt;
+} __packed;
+
+struct wmi_stats_event {
+ __le32 stats_id; /* WMI_STAT_ */
+ /*
+ * number of pdev stats event structures
+ * (wmi_pdev_stats) 0 or 1
+ */
+ __le32 num_pdev_stats;
+ /*
+ * number of vdev stats event structures
+ * (wmi_vdev_stats) 0 or max vdevs
+ */
+ __le32 num_vdev_stats;
+ /*
+ * number of peer stats event structures
+ * (wmi_peer_stats) 0 or max peers
+ */
+ __le32 num_peer_stats;
+ __le32 num_bcnflt_stats;
+ /*
+ * followed by
+ * num_pdev_stats * size of(struct wmi_pdev_stats)
+ * num_vdev_stats * size of(struct wmi_vdev_stats)
+ * num_peer_stats * size of(struct wmi_peer_stats)
+ *
+ * By having a zero sized array, the pointer to data area
+ * becomes available without increasing the struct size
+ */
+ u8 data[];
+} __packed;
+
+struct wmi_10_2_stats_event {
+ __le32 stats_id; /* %WMI_REQUEST_ */
+ __le32 num_pdev_stats;
+ __le32 num_pdev_ext_stats;
+ __le32 num_vdev_stats;
+ __le32 num_peer_stats;
+ __le32 num_bcnflt_stats;
+ u8 data[];
+} __packed;
+
+/*
+ * PDEV statistics
+ * TODO: add all PDEV stats here
+ */
+struct wmi_pdev_stats_base {
+ __le32 chan_nf;
+ __le32 tx_frame_count; /* Cycles spent transmitting frames */
+ __le32 rx_frame_count; /* Cycles spent receiving frames */
+ __le32 rx_clear_count; /* Total channel busy time, evidently */
+ __le32 cycle_count; /* Total on-channel time */
+ __le32 phy_err_count;
+ __le32 chan_tx_pwr;
+} __packed;
+
+struct wmi_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ struct wmi_pdev_stats_rx rx;
+ struct wmi_pdev_stats_peer peer;
+} __packed;
+
+struct wmi_pdev_stats_extra {
+ __le32 ack_rx_bad;
+ __le32 rts_bad;
+ __le32 rts_good;
+ __le32 fcs_bad;
+ __le32 no_beacons;
+ __le32 mib_int_count;
+} __packed;
+
+struct wmi_10x_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ struct wmi_pdev_stats_rx rx;
+ struct wmi_pdev_stats_peer peer;
+ struct wmi_pdev_stats_extra extra;
+} __packed;
+
+struct wmi_pdev_stats_mem {
+ __le32 dram_free;
+ __le32 iram_free;
+} __packed;
+
+struct wmi_10_2_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ __le32 mc_drop;
+ struct wmi_pdev_stats_rx rx;
+ __le32 pdev_rx_timeout;
+ struct wmi_pdev_stats_mem mem;
+ struct wmi_pdev_stats_peer peer;
+ struct wmi_pdev_stats_extra extra;
+} __packed;
+
+struct wmi_10_4_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_10_4_pdev_stats_tx tx;
+ struct wmi_pdev_stats_rx rx;
+ __le32 rx_ovfl_errs;
+ struct wmi_pdev_stats_mem mem;
+ __le32 sram_free_size;
+ struct wmi_pdev_stats_extra extra;
+} __packed;
+
+/*
+ * VDEV statistics
+ */
+
+#define WMI_VDEV_STATS_FTM_COUNT_VALID BIT(31)
+#define WMI_VDEV_STATS_FTM_COUNT_LSB 0
+#define WMI_VDEV_STATS_FTM_COUNT_MASK 0x7fffffff
+
+struct wmi_vdev_stats {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_stats_extd {
+ __le32 vdev_id;
+ __le32 ppdu_aggr_cnt;
+ __le32 ppdu_noack;
+ __le32 mpdu_queued;
+ __le32 ppdu_nonaggr_cnt;
+ __le32 mpdu_sw_requeued;
+ __le32 mpdu_suc_retry;
+ __le32 mpdu_suc_multitry;
+ __le32 mpdu_fail_retry;
+ __le32 tx_ftm_suc;
+ __le32 tx_ftm_suc_retry;
+ __le32 tx_ftm_fail;
+ __le32 rx_ftmr_cnt;
+ __le32 rx_ftmr_dup_cnt;
+ __le32 rx_iftmr_cnt;
+ __le32 rx_iftmr_dup_cnt;
+ __le32 reserved[6];
+} __packed;
+
+/*
+ * peer statistics.
+ * TODO: add more stats
+ */
+struct wmi_peer_stats {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_rssi;
+ __le32 peer_tx_rate;
+} __packed;
+
+struct wmi_10x_peer_stats {
+ struct wmi_peer_stats old;
+ __le32 peer_rx_rate;
+} __packed;
+
+struct wmi_10_2_peer_stats {
+ struct wmi_peer_stats old;
+ __le32 peer_rx_rate;
+ __le32 current_per;
+ __le32 retries;
+ __le32 tx_rate_count;
+ __le32 max_4ms_frame_len;
+ __le32 total_sub_frames;
+ __le32 tx_bytes;
+ __le32 num_pkt_loss_overflow[4];
+ __le32 num_pkt_loss_excess_retry[4];
+} __packed;
+
+struct wmi_10_2_4_peer_stats {
+ struct wmi_10_2_peer_stats common;
+ __le32 peer_rssi_changed;
+} __packed;
+
+struct wmi_10_2_4_ext_peer_stats {
+ struct wmi_10_2_peer_stats common;
+ __le32 peer_rssi_changed;
+ __le32 rx_duration;
+} __packed;
+
+struct wmi_10_4_peer_stats {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_rssi;
+ __le32 peer_rssi_seq_num;
+ __le32 peer_tx_rate;
+ __le32 peer_rx_rate;
+ __le32 current_per;
+ __le32 retries;
+ __le32 tx_rate_count;
+ __le32 max_4ms_frame_len;
+ __le32 total_sub_frames;
+ __le32 tx_bytes;
+ __le32 num_pkt_loss_overflow[4];
+ __le32 num_pkt_loss_excess_retry[4];
+ __le32 peer_rssi_changed;
+} __packed;
+
+struct wmi_10_4_peer_extd_stats {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 inactive_time;
+ __le32 peer_chain_rssi;
+ __le32 rx_duration;
+ __le32 reserved[10];
+} __packed;
+
+struct wmi_10_4_bss_bcn_stats {
+ __le32 vdev_id;
+ __le32 bss_bcns_dropped;
+ __le32 bss_bcn_delivered;
+} __packed;
+
+struct wmi_10_4_bss_bcn_filter_stats {
+ __le32 bcns_dropped;
+ __le32 bcns_delivered;
+ __le32 active_filters;
+ struct wmi_10_4_bss_bcn_stats bss_stats;
+} __packed;
+
+struct wmi_10_2_pdev_ext_stats {
+ __le32 rx_rssi_comb;
+ __le32 rx_rssi[4];
+ __le32 rx_mcs[10];
+ __le32 tx_mcs[10];
+ __le32 ack_rssi;
+} __packed;
+
+struct wmi_vdev_create_cmd {
+ __le32 vdev_id;
+ __le32 vdev_type;
+ __le32 vdev_subtype;
+ struct wmi_mac_addr vdev_macaddr;
+} __packed;
+
+enum wmi_vdev_type {
+ WMI_VDEV_TYPE_AP = 1,
+ WMI_VDEV_TYPE_STA = 2,
+ WMI_VDEV_TYPE_IBSS = 3,
+ WMI_VDEV_TYPE_MONITOR = 4,
+};
+
+enum wmi_vdev_subtype {
+ WMI_VDEV_SUBTYPE_NONE,
+ WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ WMI_VDEV_SUBTYPE_P2P_CLIENT,
+ WMI_VDEV_SUBTYPE_P2P_GO,
+ WMI_VDEV_SUBTYPE_PROXY_STA,
+ WMI_VDEV_SUBTYPE_MESH_11S,
+ WMI_VDEV_SUBTYPE_MESH_NON_11S,
+};
+
+enum wmi_vdev_subtype_legacy {
+ WMI_VDEV_SUBTYPE_LEGACY_NONE = 0,
+ WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV = 1,
+ WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI = 2,
+ WMI_VDEV_SUBTYPE_LEGACY_P2P_GO = 3,
+ WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA = 4,
+};
+
+enum wmi_vdev_subtype_10_2_4 {
+ WMI_VDEV_SUBTYPE_10_2_4_NONE = 0,
+ WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV = 1,
+ WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI = 2,
+ WMI_VDEV_SUBTYPE_10_2_4_P2P_GO = 3,
+ WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA = 4,
+ WMI_VDEV_SUBTYPE_10_2_4_MESH_11S = 5,
+};
+
+enum wmi_vdev_subtype_10_4 {
+ WMI_VDEV_SUBTYPE_10_4_NONE = 0,
+ WMI_VDEV_SUBTYPE_10_4_P2P_DEV = 1,
+ WMI_VDEV_SUBTYPE_10_4_P2P_CLI = 2,
+ WMI_VDEV_SUBTYPE_10_4_P2P_GO = 3,
+ WMI_VDEV_SUBTYPE_10_4_PROXY_STA = 4,
+ WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S = 5,
+ WMI_VDEV_SUBTYPE_10_4_MESH_11S = 6,
+};
+
+/* values for vdev_subtype */
+
+/* values for vdev_start_request flags */
+/*
+ * Indicates that AP VDEV uses hidden ssid. only valid for
+ * AP/GO
+ */
+#define WMI_VDEV_START_HIDDEN_SSID (1 << 0)
+/*
+ * Indicates if robust management frame/management frame
+ * protection is enabled. For GO/AP vdevs, it indicates that
+ * it may support station/client associations with RMF enabled.
+ * For STA/client vdevs, it indicates that sta will
+ * associate with AP with RMF enabled.
+ */
+#define WMI_VDEV_START_PMF_ENABLED (1 << 1)
+
+struct wmi_p2p_noa_descriptor {
+ __le32 type_count; /* 255: continuous schedule, 0: reserved */
+ __le32 duration; /* Absent period duration in micro seconds */
+ __le32 interval; /* Absent period interval in micro seconds */
+ __le32 start_time; /* 32 bit tsf time when in starts */
+} __packed;
+
+struct wmi_vdev_start_request_cmd {
+ /* WMI channel */
+ struct wmi_channel chan;
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* requestor id identifying the caller module */
+ __le32 requestor_id;
+ /* beacon interval from received beacon */
+ __le32 beacon_interval;
+ /* DTIM Period from the received beacon */
+ __le32 dtim_period;
+ /* Flags */
+ __le32 flags;
+ /* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */
+ struct wmi_ssid ssid;
+ /* beacon/probe response xmit rate. Applicable for SoftAP. */
+ __le32 bcn_tx_rate;
+ /* beacon/probe response xmit power. Applicable for SoftAP. */
+ __le32 bcn_tx_power;
+ /* number of p2p NOA descriptor(s) from scan entry */
+ __le32 num_noa_descriptors;
+ /*
+ * Disable H/W ack. This used by WMI_VDEV_RESTART_REQUEST_CMDID.
+ * During CAC, Our HW shouldn't ack ditected frames
+ */
+ __le32 disable_hw_ack;
+ /* actual p2p NOA descriptor from scan entry */
+ struct wmi_p2p_noa_descriptor noa_descriptors[2];
+} __packed;
+
+struct wmi_vdev_restart_request_cmd {
+ struct wmi_vdev_start_request_cmd vdev_start_request_cmd;
+} __packed;
+
+struct wmi_vdev_start_request_arg {
+ u32 vdev_id;
+ struct wmi_channel_arg channel;
+ u32 bcn_intval;
+ u32 dtim_period;
+ u8 *ssid;
+ u32 ssid_len;
+ u32 bcn_tx_rate;
+ u32 bcn_tx_power;
+ bool disable_hw_ack;
+ bool hidden_ssid;
+ bool pmf_enabled;
+};
+
+struct wmi_vdev_delete_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_up_cmd {
+ __le32 vdev_id;
+ __le32 vdev_assoc_id;
+ struct wmi_mac_addr vdev_bssid;
+} __packed;
+
+struct wmi_vdev_stop_cmd {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_down_cmd {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_standby_response_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_resume_response_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_set_param_cmd {
+ __le32 vdev_id;
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+#define WMI_MAX_KEY_INDEX 3
+#define WMI_MAX_KEY_LEN 32
+
+#define WMI_KEY_PAIRWISE 0x00
+#define WMI_KEY_GROUP 0x01
+#define WMI_KEY_TX_USAGE 0x02 /* default tx key - static wep */
+
+struct wmi_key_seq_counter {
+ __le32 key_seq_counter_l;
+ __le32 key_seq_counter_h;
+} __packed;
+
+enum wmi_cipher_suites {
+ WMI_CIPHER_NONE,
+ WMI_CIPHER_WEP,
+ WMI_CIPHER_TKIP,
+ WMI_CIPHER_AES_OCB,
+ WMI_CIPHER_AES_CCM,
+ WMI_CIPHER_WAPI,
+ WMI_CIPHER_CKIP,
+ WMI_CIPHER_AES_CMAC,
+ WMI_CIPHER_AES_GCM,
+};
+
+enum wmi_tlv_cipher_suites {
+ WMI_TLV_CIPHER_NONE,
+ WMI_TLV_CIPHER_WEP,
+ WMI_TLV_CIPHER_TKIP,
+ WMI_TLV_CIPHER_AES_OCB,
+ WMI_TLV_CIPHER_AES_CCM,
+ WMI_TLV_CIPHER_WAPI,
+ WMI_TLV_CIPHER_CKIP,
+ WMI_TLV_CIPHER_AES_CMAC,
+ WMI_TLV_CIPHER_ANY,
+ WMI_TLV_CIPHER_AES_GCM,
+};
+
+struct wmi_vdev_install_key_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 key_idx;
+ __le32 key_flags;
+ __le32 key_cipher; /* %WMI_CIPHER_ */
+ struct wmi_key_seq_counter key_rsc_counter;
+ struct wmi_key_seq_counter key_global_rsc_counter;
+ struct wmi_key_seq_counter key_tsc_counter;
+ u8 wpi_key_rsc_counter[16];
+ u8 wpi_key_tsc_counter[16];
+ __le32 key_len;
+ __le32 key_txmic_len;
+ __le32 key_rxmic_len;
+
+ /* contains key followed by tx mic followed by rx mic */
+ u8 key_data[];
+} __packed;
+
+struct wmi_vdev_install_key_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ const void *key_data;
+};
+
+/*
+ * vdev fixed rate format:
+ * - preamble - b7:b6 - see WMI_RATE_PREMABLE_
+ * - nss - b5:b4 - ss number (0 mean 1ss)
+ * - rate_mcs - b3:b0 - as below
+ * CCK: 0 - 11Mbps, 1 - 5,5Mbps, 2 - 2Mbps, 3 - 1Mbps,
+ * 4 - 11Mbps (s), 5 - 5,5Mbps (s), 6 - 2Mbps (s)
+ * OFDM: 0 - 48Mbps, 1 - 24Mbps, 2 - 12Mbps, 3 - 6Mbps,
+ * 4 - 54Mbps, 5 - 36Mbps, 6 - 18Mbps, 7 - 9Mbps
+ * HT/VHT: MCS index
+ */
+
+/* Preamble types to be used with VDEV fixed rate configuration */
+enum wmi_rate_preamble {
+ WMI_RATE_PREAMBLE_OFDM,
+ WMI_RATE_PREAMBLE_CCK,
+ WMI_RATE_PREAMBLE_HT,
+ WMI_RATE_PREAMBLE_VHT,
+};
+
+#define ATH10K_HW_NSS(rate) (1 + (((rate) >> 4) & 0x3))
+#define ATH10K_HW_PREAMBLE(rate) (((rate) >> 6) & 0x3)
+#define ATH10K_HW_MCS_RATE(rate) ((rate) & 0xf)
+#define ATH10K_HW_LEGACY_RATE(rate) ((rate) & 0x3f)
+#define ATH10K_HW_BW(flags) (((flags) >> 3) & 0x3)
+#define ATH10K_HW_GI(flags) (((flags) >> 5) & 0x1)
+#define ATH10K_HW_RATECODE(rate, nss, preamble) \
+ (((preamble) << 6) | ((nss) << 4) | (rate))
+#define ATH10K_HW_AMPDU(flags) ((flags) & 0x1)
+#define ATH10K_HW_BA_FAIL(flags) (((flags) >> 1) & 0x3)
+#define ATH10K_FW_SKIPPED_RATE_CTRL(flags) (((flags) >> 6) & 0x1)
+
+#define ATH10K_VHT_MCS_NUM 10
+#define ATH10K_BW_NUM 6
+#define ATH10K_NSS_NUM 4
+#define ATH10K_LEGACY_NUM 12
+#define ATH10K_GI_NUM 2
+#define ATH10K_HT_MCS_NUM 32
+#define ATH10K_RATE_TABLE_NUM 320
+#define ATH10K_RATE_INFO_FLAGS_SGI_BIT 2
+
+/* Value to disable fixed rate setting */
+#define WMI_FIXED_RATE_NONE (0xff)
+
+struct wmi_peer_param_map {
+ u32 smps_state;
+ u32 ampdu;
+ u32 authorize;
+ u32 chan_width;
+ u32 nss;
+ u32 use_4addr;
+ u32 membership;
+ u32 use_fixed_power;
+ u32 user_pos;
+ u32 crit_proto_hint_enabled;
+ u32 tx_fail_cnt_thr;
+ u32 set_hw_retry_cts2s;
+ u32 ibss_atim_win_len;
+ u32 debug;
+ u32 phymode;
+ u32 dummy_var;
+};
+
+struct wmi_vdev_param_map {
+ u32 rts_threshold;
+ u32 fragmentation_threshold;
+ u32 beacon_interval;
+ u32 listen_interval;
+ u32 multicast_rate;
+ u32 mgmt_tx_rate;
+ u32 slot_time;
+ u32 preamble;
+ u32 swba_time;
+ u32 wmi_vdev_stats_update_period;
+ u32 wmi_vdev_pwrsave_ageout_time;
+ u32 wmi_vdev_host_swba_interval;
+ u32 dtim_period;
+ u32 wmi_vdev_oc_scheduler_air_time_limit;
+ u32 wds;
+ u32 atim_window;
+ u32 bmiss_count_max;
+ u32 bmiss_first_bcnt;
+ u32 bmiss_final_bcnt;
+ u32 feature_wmm;
+ u32 chwidth;
+ u32 chextoffset;
+ u32 disable_htprotection;
+ u32 sta_quickkickout;
+ u32 mgmt_rate;
+ u32 protection_mode;
+ u32 fixed_rate;
+ u32 sgi;
+ u32 ldpc;
+ u32 tx_stbc;
+ u32 rx_stbc;
+ u32 intra_bss_fwd;
+ u32 def_keyid;
+ u32 nss;
+ u32 bcast_data_rate;
+ u32 mcast_data_rate;
+ u32 mcast_indicate;
+ u32 dhcp_indicate;
+ u32 unknown_dest_indicate;
+ u32 ap_keepalive_min_idle_inactive_time_secs;
+ u32 ap_keepalive_max_idle_inactive_time_secs;
+ u32 ap_keepalive_max_unresponsive_time_secs;
+ u32 ap_enable_nawds;
+ u32 mcast2ucast_set;
+ u32 enable_rtscts;
+ u32 txbf;
+ u32 packet_powersave;
+ u32 drop_unencry;
+ u32 tx_encap_type;
+ u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
+ u32 rc_num_retries;
+ u32 cabq_maxdur;
+ u32 mfptest_set;
+ u32 rts_fixed_rate;
+ u32 vht_sgimask;
+ u32 vht80_ratemask;
+ u32 early_rx_adjust_enable;
+ u32 early_rx_tgt_bmiss_num;
+ u32 early_rx_bmiss_sample_cycle;
+ u32 early_rx_slop_step;
+ u32 early_rx_init_slop;
+ u32 early_rx_adjust_pause;
+ u32 proxy_sta;
+ u32 meru_vc;
+ u32 rx_decap_type;
+ u32 bw_nss_ratemask;
+ u32 inc_tsf;
+ u32 dec_tsf;
+ u32 disable_4addr_src_lrn;
+ u32 rtt_responder_role;
+};
+
+#define WMI_VDEV_PARAM_UNSUPPORTED 0
+
+/* the definition of different VDEV parameters */
+enum wmi_vdev_param {
+ /* RTS Threshold */
+ WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ /* Fragmentation threshold */
+ WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ /* beacon interval in TUs */
+ WMI_VDEV_PARAM_BEACON_INTERVAL,
+ /* Listen interval in TUs */
+ WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ /* multicast rate in Mbps */
+ WMI_VDEV_PARAM_MULTICAST_RATE,
+ /* management frame rate in Mbps */
+ WMI_VDEV_PARAM_MGMT_TX_RATE,
+ /* slot time (long vs short) */
+ WMI_VDEV_PARAM_SLOT_TIME,
+ /* preamble (long vs short) */
+ WMI_VDEV_PARAM_PREAMBLE,
+ /* SWBA time (time before tbtt in msec) */
+ WMI_VDEV_PARAM_SWBA_TIME,
+ /* time period for updating VDEV stats */
+ WMI_VDEV_STATS_UPDATE_PERIOD,
+ /* age out time in msec for frames queued for station in power save */
+ WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ /*
+ * Host SWBA interval (time in msec before tbtt for SWBA event
+ * generation).
+ */
+ WMI_VDEV_HOST_SWBA_INTERVAL,
+ /* DTIM period (specified in units of num beacon intervals) */
+ WMI_VDEV_PARAM_DTIM_PERIOD,
+ /*
+ * scheduler air time limit for this VDEV. used by off chan
+ * scheduler.
+ */
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ /* enable/disable WDS for this VDEV */
+ WMI_VDEV_PARAM_WDS,
+ /* ATIM Window */
+ WMI_VDEV_PARAM_ATIM_WINDOW,
+ /* BMISS max */
+ WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ /* BMISS first time */
+ WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ /* BMISS final time */
+ WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ /* WMM enables/disabled */
+ WMI_VDEV_PARAM_FEATURE_WMM,
+ /* Channel width */
+ WMI_VDEV_PARAM_CHWIDTH,
+ /* Channel Offset */
+ WMI_VDEV_PARAM_CHEXTOFFSET,
+ /* Disable HT Protection */
+ WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ /* Quick STA Kickout */
+ WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ /* Rate to be used with Management frames */
+ WMI_VDEV_PARAM_MGMT_RATE,
+ /* Protection Mode */
+ WMI_VDEV_PARAM_PROTECTION_MODE,
+ /* Fixed rate setting */
+ WMI_VDEV_PARAM_FIXED_RATE,
+ /* Short GI Enable/Disable */
+ WMI_VDEV_PARAM_SGI,
+ /* Enable LDPC */
+ WMI_VDEV_PARAM_LDPC,
+ /* Enable Tx STBC */
+ WMI_VDEV_PARAM_TX_STBC,
+ /* Enable Rx STBC */
+ WMI_VDEV_PARAM_RX_STBC,
+ /* Intra BSS forwarding */
+ WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ /* Setting Default xmit key for Vdev */
+ WMI_VDEV_PARAM_DEF_KEYID,
+ /* NSS width */
+ WMI_VDEV_PARAM_NSS,
+ /* Set the custom rate for the broadcast data frames */
+ WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ /* Set the custom rate (rate-code) for multicast data frames */
+ WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ /* Tx multicast packet indicate Enable/Disable */
+ WMI_VDEV_PARAM_MCAST_INDICATE,
+ /* Tx DHCP packet indicate Enable/Disable */
+ WMI_VDEV_PARAM_DHCP_INDICATE,
+ /* Enable host inspection of Tx unicast packet to unknown destination */
+ WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+ /* The minimum amount of time AP begins to consider STA inactive */
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered inactive when there is no recent
+ * TX/RX activity and no downlink frames are buffered for it. Once a
+ * STA exceeds the maximum idle inactive time, the AP will send an
+ * 802.11 data-null as a keep alive to verify the STA is still
+ * associated. If the STA does ACK the data-null, or if the data-null
+ * is buffered and the STA does not retrieve it, the STA will be
+ * considered unresponsive
+ * (see WMI_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+ */
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered unresponsive if there is no recent
+ * TX/RX activity and downlink frames are buffered for it. Once a STA
+ * exceeds the maximum unresponsive time, the AP will send a
+ * WMI_STA_KICKOUT event to the host so the STA can be deleted.
+ */
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+ /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+ WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ /* Enable/Disable RTS-CTS */
+ WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ /* Enable TXBFee/er */
+ WMI_VDEV_PARAM_TXBF,
+
+ /* Set packet power save */
+ WMI_VDEV_PARAM_PACKET_POWERSAVE,
+
+ /*
+ * Drops un-encrypted packets if eceived in an encrypted connection
+ * otherwise forwards to host.
+ */
+ WMI_VDEV_PARAM_DROP_UNENCRY,
+
+ /*
+ * Set the encapsulation type for frames.
+ */
+ WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+};
+
+/* the definition of different VDEV parameters */
+enum wmi_10x_vdev_param {
+ /* RTS Threshold */
+ WMI_10X_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ /* Fragmentation threshold */
+ WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ /* beacon interval in TUs */
+ WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ /* Listen interval in TUs */
+ WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ /* multicast rate in Mbps */
+ WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ /* management frame rate in Mbps */
+ WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ /* slot time (long vs short) */
+ WMI_10X_VDEV_PARAM_SLOT_TIME,
+ /* preamble (long vs short) */
+ WMI_10X_VDEV_PARAM_PREAMBLE,
+ /* SWBA time (time before tbtt in msec) */
+ WMI_10X_VDEV_PARAM_SWBA_TIME,
+ /* time period for updating VDEV stats */
+ WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ /* age out time in msec for frames queued for station in power save */
+ WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ /*
+ * Host SWBA interval (time in msec before tbtt for SWBA event
+ * generation).
+ */
+ WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ /* DTIM period (specified in units of num beacon intervals) */
+ WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ /*
+ * scheduler air time limit for this VDEV. used by off chan
+ * scheduler.
+ */
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ /* enable/disable WDS for this VDEV */
+ WMI_10X_VDEV_PARAM_WDS,
+ /* ATIM Window */
+ WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ /* BMISS max */
+ WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ /* WMM enables/disabled */
+ WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ /* Channel width */
+ WMI_10X_VDEV_PARAM_CHWIDTH,
+ /* Channel Offset */
+ WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ /* Disable HT Protection */
+ WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ /* Quick STA Kickout */
+ WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ /* Rate to be used with Management frames */
+ WMI_10X_VDEV_PARAM_MGMT_RATE,
+ /* Protection Mode */
+ WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ /* Fixed rate setting */
+ WMI_10X_VDEV_PARAM_FIXED_RATE,
+ /* Short GI Enable/Disable */
+ WMI_10X_VDEV_PARAM_SGI,
+ /* Enable LDPC */
+ WMI_10X_VDEV_PARAM_LDPC,
+ /* Enable Tx STBC */
+ WMI_10X_VDEV_PARAM_TX_STBC,
+ /* Enable Rx STBC */
+ WMI_10X_VDEV_PARAM_RX_STBC,
+ /* Intra BSS forwarding */
+ WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ /* Setting Default xmit key for Vdev */
+ WMI_10X_VDEV_PARAM_DEF_KEYID,
+ /* NSS width */
+ WMI_10X_VDEV_PARAM_NSS,
+ /* Set the custom rate for the broadcast data frames */
+ WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ /* Set the custom rate (rate-code) for multicast data frames */
+ WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ /* Tx multicast packet indicate Enable/Disable */
+ WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ /* Tx DHCP packet indicate Enable/Disable */
+ WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ /* Enable host inspection of Tx unicast packet to unknown destination */
+ WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+ /* The minimum amount of time AP begins to consider STA inactive */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered inactive when there is no recent
+ * TX/RX activity and no downlink frames are buffered for it. Once a
+ * STA exceeds the maximum idle inactive time, the AP will send an
+ * 802.11 data-null as a keep alive to verify the STA is still
+ * associated. If the STA does ACK the data-null, or if the data-null
+ * is buffered and the STA does not retrieve it, the STA will be
+ * considered unresponsive
+ * (see WMI_10X_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+ */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered unresponsive if there is no recent
+ * TX/RX activity and downlink frames are buffered for it. Once a STA
+ * exceeds the maximum unresponsive time, the AP will send a
+ * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted.
+ */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+ /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+ WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+
+ WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ /* Enable/Disable RTS-CTS */
+ WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+
+ /* following are available as of firmware 10.2 */
+ WMI_10X_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_10X_VDEV_PARAM_CABQ_MAXDUR,
+ WMI_10X_VDEV_PARAM_MFPTEST_SET,
+ WMI_10X_VDEV_PARAM_RTS_FIXED_RATE,
+ WMI_10X_VDEV_PARAM_VHT_SGIMASK,
+ WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_10X_VDEV_PARAM_TSF_INCREMENT,
+};
+
+enum wmi_10_4_vdev_param {
+ WMI_10_4_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+ WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+ WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+ WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+ WMI_10_4_VDEV_PARAM_SLOT_TIME,
+ WMI_10_4_VDEV_PARAM_PREAMBLE,
+ WMI_10_4_VDEV_PARAM_SWBA_TIME,
+ WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+ WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+ WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+ WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+ WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ WMI_10_4_VDEV_PARAM_WDS,
+ WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+ WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+ WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+ WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+ WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+ WMI_10_4_VDEV_PARAM_CHWIDTH,
+ WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+ WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+ WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+ WMI_10_4_VDEV_PARAM_MGMT_RATE,
+ WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+ WMI_10_4_VDEV_PARAM_FIXED_RATE,
+ WMI_10_4_VDEV_PARAM_SGI,
+ WMI_10_4_VDEV_PARAM_LDPC,
+ WMI_10_4_VDEV_PARAM_TX_STBC,
+ WMI_10_4_VDEV_PARAM_RX_STBC,
+ WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+ WMI_10_4_VDEV_PARAM_DEF_KEYID,
+ WMI_10_4_VDEV_PARAM_NSS,
+ WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+ WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+ WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+ WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+ WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+ WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+ WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+ WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+ WMI_10_4_VDEV_PARAM_TXBF,
+ WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+ WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+ WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+ WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+ WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+ WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+ WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ WMI_10_4_VDEV_PARAM_PROXY_STA,
+ WMI_10_4_VDEV_PARAM_MERU_VC,
+ WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+ WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+ WMI_10_4_VDEV_PARAM_SENSOR_AP,
+ WMI_10_4_VDEV_PARAM_BEACON_RATE,
+ WMI_10_4_VDEV_PARAM_DTIM_ENABLE_CTS,
+ WMI_10_4_VDEV_PARAM_STA_KICKOUT,
+ WMI_10_4_VDEV_PARAM_CAPABILITIES,
+ WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
+ WMI_10_4_VDEV_PARAM_RX_FILTER,
+ WMI_10_4_VDEV_PARAM_MGMT_TX_POWER,
+ WMI_10_4_VDEV_PARAM_ATF_SSID_SCHED_POLICY,
+ WMI_10_4_VDEV_PARAM_DISABLE_DYN_BW_RTS,
+ WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
+ WMI_10_4_VDEV_PARAM_SELFGEN_FIXED_RATE,
+ WMI_10_4_VDEV_PARAM_AMPDU_SUBFRAME_SIZE_PER_AC,
+ WMI_10_4_VDEV_PARAM_NSS_VHT160,
+ WMI_10_4_VDEV_PARAM_NSS_VHT80_80,
+ WMI_10_4_VDEV_PARAM_AMSDU_SUBFRAME_SIZE_PER_AC,
+ WMI_10_4_VDEV_PARAM_DISABLE_CABQ,
+ WMI_10_4_VDEV_PARAM_SIFS_TRIGGER_RATE,
+ WMI_10_4_VDEV_PARAM_TX_POWER,
+ WMI_10_4_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE,
+ WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN,
+};
+
+#define WMI_VDEV_DISABLE_4_ADDR_SRC_LRN 1
+
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+
+#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70
+#define WMI_TXBF_CONF_IMPLICIT_BF BIT(7)
+#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00
+
+/* slot time long */
+#define WMI_VDEV_SLOT_TIME_LONG 0x1
+/* slot time short */
+#define WMI_VDEV_SLOT_TIME_SHORT 0x2
+/* preablbe long */
+#define WMI_VDEV_PREAMBLE_LONG 0x1
+/* preablbe short */
+#define WMI_VDEV_PREAMBLE_SHORT 0x2
+
+enum wmi_start_event_param {
+ WMI_VDEV_RESP_START_EVENT = 0,
+ WMI_VDEV_RESP_RESTART_EVENT,
+};
+
+struct wmi_vdev_start_response_event {
+ __le32 vdev_id;
+ __le32 req_id;
+ __le32 resp_type; /* %WMI_VDEV_RESP_ */
+ __le32 status;
+} __packed;
+
+struct wmi_vdev_standby_req_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_resume_req_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_stopped_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+/*
+ * common structure used for simple events
+ * (stopped, resume_req, standby response)
+ */
+struct wmi_vdev_simple_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+/* VDEV start response status codes */
+/* VDEV successfully started */
+#define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS 0x0
+
+/* requested VDEV not found */
+#define WMI_INIFIED_VDEV_START_RESPONSE_INVALID_VDEVID 0x1
+
+/* unsupported VDEV combination */
+#define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED 0x2
+
+/* TODO: please add more comments if you have in-depth information */
+struct wmi_vdev_spectral_conf_cmd {
+ __le32 vdev_id;
+
+ /* number of fft samples to send (0 for infinite) */
+ __le32 scan_count;
+ __le32 scan_period;
+ __le32 scan_priority;
+
+ /* number of bins in the FFT: 2^(fft_size - bin_scale) */
+ __le32 scan_fft_size;
+ __le32 scan_gc_ena;
+ __le32 scan_restart_ena;
+ __le32 scan_noise_floor_ref;
+ __le32 scan_init_delay;
+ __le32 scan_nb_tone_thr;
+ __le32 scan_str_bin_thr;
+ __le32 scan_wb_rpt_mode;
+ __le32 scan_rssi_rpt_mode;
+ __le32 scan_rssi_thr;
+ __le32 scan_pwr_format;
+
+ /* rpt_mode: Format of FFT report to software for spectral scan
+ * triggered FFTs:
+ * 0: No FFT report (only spectral scan summary report)
+ * 1: 2-dword summary of metrics for each completed FFT + spectral
+ * scan summary report
+ * 2: 2-dword summary of metrics for each completed FFT +
+ * 1x- oversampled bins(in-band) per FFT + spectral scan summary
+ * report
+ * 3: 2-dword summary of metrics for each completed FFT +
+ * 2x- oversampled bins (all) per FFT + spectral scan summary
+ */
+ __le32 scan_rpt_mode;
+ __le32 scan_bin_scale;
+ __le32 scan_dbm_adj;
+ __le32 scan_chn_mask;
+} __packed;
+
+struct wmi_vdev_spectral_conf_arg {
+ u32 vdev_id;
+ u32 scan_count;
+ u32 scan_period;
+ u32 scan_priority;
+ u32 scan_fft_size;
+ u32 scan_gc_ena;
+ u32 scan_restart_ena;
+ u32 scan_noise_floor_ref;
+ u32 scan_init_delay;
+ u32 scan_nb_tone_thr;
+ u32 scan_str_bin_thr;
+ u32 scan_wb_rpt_mode;
+ u32 scan_rssi_rpt_mode;
+ u32 scan_rssi_thr;
+ u32 scan_pwr_format;
+ u32 scan_rpt_mode;
+ u32 scan_bin_scale;
+ u32 scan_dbm_adj;
+ u32 scan_chn_mask;
+};
+
+#define WMI_SPECTRAL_ENABLE_DEFAULT 0
+#define WMI_SPECTRAL_COUNT_DEFAULT 0
+#define WMI_SPECTRAL_PERIOD_DEFAULT 35
+#define WMI_SPECTRAL_PRIORITY_DEFAULT 1
+#define WMI_SPECTRAL_FFT_SIZE_DEFAULT 7
+#define WMI_SPECTRAL_GC_ENA_DEFAULT 1
+#define WMI_SPECTRAL_RESTART_ENA_DEFAULT 0
+#define WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96
+#define WMI_SPECTRAL_INIT_DELAY_DEFAULT 80
+#define WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12
+#define WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8
+#define WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0
+#define WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0
+#define WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0
+#define WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0
+#define WMI_SPECTRAL_RPT_MODE_DEFAULT 2
+#define WMI_SPECTRAL_BIN_SCALE_DEFAULT 1
+#define WMI_SPECTRAL_DBM_ADJ_DEFAULT 1
+#define WMI_SPECTRAL_CHN_MASK_DEFAULT 1
+
+struct wmi_vdev_spectral_enable_cmd {
+ __le32 vdev_id;
+ __le32 trigger_cmd;
+ __le32 enable_cmd;
+} __packed;
+
+#define WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1
+#define WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2
+#define WMI_SPECTRAL_ENABLE_CMD_ENABLE 1
+#define WMI_SPECTRAL_ENABLE_CMD_DISABLE 2
+
+/* Beacon processing related command and event structures */
+struct wmi_bcn_tx_hdr {
+ __le32 vdev_id;
+ __le32 tx_rate;
+ __le32 tx_power;
+ __le32 bcn_len;
+} __packed;
+
+struct wmi_bcn_tx_cmd {
+ struct wmi_bcn_tx_hdr hdr;
+ u8 *bcn[];
+} __packed;
+
+struct wmi_bcn_tx_arg {
+ u32 vdev_id;
+ u32 tx_rate;
+ u32 tx_power;
+ u32 bcn_len;
+ const void *bcn;
+};
+
+enum wmi_bcn_tx_ref_flags {
+ WMI_BCN_TX_REF_FLAG_DTIM_ZERO = 0x1,
+ WMI_BCN_TX_REF_FLAG_DELIVER_CAB = 0x2,
+};
+
+/* TODO: It is unclear why "no antenna" works while any other seemingly valid
+ * chainmask yields no beacons on the air at all.
+ */
+#define WMI_BCN_TX_REF_DEF_ANTENNA 0
+
+struct wmi_bcn_tx_ref_cmd {
+ __le32 vdev_id;
+ __le32 data_len;
+ /* physical address of the frame - dma pointer */
+ __le32 data_ptr;
+ /* id for host to track */
+ __le32 msdu_id;
+ /* frame ctrl to setup PPDU desc */
+ __le32 frame_control;
+ /* to control CABQ traffic: WMI_BCN_TX_REF_FLAG_ */
+ __le32 flags;
+ /* introduced in 10.2 */
+ __le32 antenna_mask;
+} __packed;
+
+/* Beacon filter */
+#define WMI_BCN_FILTER_ALL 0 /* Filter all beacons */
+#define WMI_BCN_FILTER_NONE 1 /* Pass all beacons */
+#define WMI_BCN_FILTER_RSSI 2 /* Pass Beacons RSSI >= RSSI threshold */
+#define WMI_BCN_FILTER_BSSID 3 /* Pass Beacons with matching BSSID */
+#define WMI_BCN_FILTER_SSID 4 /* Pass Beacons with matching SSID */
+
+struct wmi_bcn_filter_rx_cmd {
+ /* Filter ID */
+ __le32 bcn_filter_id;
+ /* Filter type - wmi_bcn_filter */
+ __le32 bcn_filter;
+ /* Buffer len */
+ __le32 bcn_filter_len;
+ /* Filter info (threshold, BSSID, RSSI) */
+ u8 *bcn_filter_buf;
+} __packed;
+
+/* Capabilities and IEs to be passed to firmware */
+struct wmi_bcn_prb_info {
+ /* Capabilities */
+ __le32 caps;
+ /* ERP info */
+ __le32 erp;
+ /* Advanced capabilities */
+ /* HT capabilities */
+ /* HT Info */
+ /* ibss_dfs */
+ /* wpa Info */
+ /* rsn Info */
+ /* rrm info */
+ /* ath_ext */
+ /* app IE */
+} __packed;
+
+enum wmi_sta_ps_mode {
+ /* enable power save for the given STA VDEV */
+ WMI_STA_PS_MODE_DISABLED = 0,
+ /* disable power save for a given STA VDEV */
+ WMI_STA_PS_MODE_ENABLED = 1,
+};
+
+struct wmi_sta_powersave_mode_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+
+ /*
+ * Power save mode
+ * (see enum wmi_sta_ps_mode)
+ */
+ __le32 sta_ps_mode;
+} __packed;
+
+enum wmi_csa_offload_en {
+ WMI_CSA_OFFLOAD_DISABLE = 0,
+ WMI_CSA_OFFLOAD_ENABLE = 1,
+};
+
+struct wmi_csa_offload_enable_cmd {
+ __le32 vdev_id;
+ __le32 csa_offload_enable;
+} __packed;
+
+struct wmi_csa_offload_chanswitch_cmd {
+ __le32 vdev_id;
+ struct wmi_channel chan;
+} __packed;
+
+/*
+ * This parameter controls the policy for retrieving frames from AP while the
+ * STA is in sleep state.
+ *
+ * Only takes affect if the sta_ps_mode is enabled
+ */
+enum wmi_sta_ps_param_rx_wake_policy {
+ /*
+ * Wake up when ever there is an RX activity on the VDEV. In this mode
+ * the Power save SM(state machine) will come out of sleep by either
+ * sending null frame (or) a data frame (with PS==0) in response to TIM
+ * bit set in the received beacon frame from AP.
+ */
+ WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
+
+ /*
+ * Here the power save state machine will not wakeup in response to TIM
+ * bit, instead it will send a PSPOLL (or) UASPD trigger based on UAPSD
+ * configuration setup by WMISET_PS_SET_UAPSD WMI command. When all
+ * access categories are delivery-enabled, the station will send a
+ * UAPSD trigger frame, otherwise it will send a PS-Poll.
+ */
+ WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
+};
+
+/*
+ * Number of tx frames/beacon that cause the power save SM to wake up.
+ *
+ * Value 1 causes the SM to wake up for every TX. Value 0 has a special
+ * meaning, It will cause the SM to never wake up. This is useful if you want
+ * to keep the system to sleep all the time for some kind of test mode . host
+ * can change this parameter any time. It will affect at the next tx frame.
+ */
+enum wmi_sta_ps_param_tx_wake_threshold {
+ WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
+ WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
+
+ /*
+ * Values greater than one indicate that many TX attempts per beacon
+ * interval before the STA will wake up
+ */
+};
+
+/*
+ * The maximum number of PS-Poll frames the FW will send in response to
+ * traffic advertised in TIM before waking up (by sending a null frame with PS
+ * = 0). Value 0 has a special meaning: there is no maximum count and the FW
+ * will send as many PS-Poll as are necessary to retrieve buffered BU. This
+ * parameter is used when the RX wake policy is
+ * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
+ * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
+ */
+enum wmi_sta_ps_param_pspoll_count {
+ WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
+ /*
+ * Values greater than 0 indicate the maximum number of PS-Poll frames
+ * FW will send before waking up.
+ */
+
+ /* When u-APSD is enabled the firmware will be very reluctant to exit
+ * STA PS. This could result in very poor Rx performance with STA doing
+ * PS-Poll for each and every buffered frame. This value is a bit
+ * arbitrary.
+ */
+ WMI_STA_PS_PSPOLL_COUNT_UAPSD = 3,
+};
+
+/*
+ * This will include the delivery and trigger enabled state for every AC.
+ * This is the negotiated state with AP. The host MLME needs to set this based
+ * on AP capability and the state Set in the association request by the
+ * station MLME.Lower 8 bits of the value specify the UAPSD configuration.
+ */
+#define WMI_UAPSD_AC_TYPE_DELI 0
+#define WMI_UAPSD_AC_TYPE_TRIG 1
+
+#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
+ (type == WMI_UAPSD_AC_TYPE_DELI ? 1 << (ac << 1) : 1 << ((ac << 1) + 1))
+
+enum wmi_sta_ps_param_uapsd {
+ WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX
+
+struct wmi_sta_uapsd_auto_trig_param {
+ __le32 wmm_ac;
+ __le32 user_priority;
+ __le32 service_interval;
+ __le32 suspend_interval;
+ __le32 delay_interval;
+};
+
+struct wmi_sta_uapsd_auto_trig_cmd_fixed_param {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 num_ac;
+};
+
+struct wmi_sta_uapsd_auto_trig_arg {
+ u32 wmm_ac;
+ u32 user_priority;
+ u32 service_interval;
+ u32 suspend_interval;
+ u32 delay_interval;
+};
+
+enum wmi_sta_powersave_param {
+ /*
+ * Controls how frames are retrievd from AP while STA is sleeping
+ *
+ * (see enum wmi_sta_ps_param_rx_wake_policy)
+ */
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
+
+ /*
+ * The STA will go active after this many TX
+ *
+ * (see enum wmi_sta_ps_param_tx_wake_threshold)
+ */
+ WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
+
+ /*
+ * Number of PS-Poll to send before STA wakes up
+ *
+ * (see enum wmi_sta_ps_param_pspoll_count)
+ *
+ */
+ WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
+
+ /*
+ * TX/RX inactivity time in msec before going to sleep.
+ *
+ * The power save SM will monitor tx/rx activity on the VDEV, if no
+ * activity for the specified msec of the parameter the Power save
+ * SM will go to sleep.
+ */
+ WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
+
+ /*
+ * Set uapsd configuration.
+ *
+ * (see enum wmi_sta_ps_param_uapsd)
+ */
+ WMI_STA_PS_PARAM_UAPSD = 4,
+};
+
+struct wmi_sta_powersave_param_cmd {
+ __le32 vdev_id;
+ __le32 param_id; /* %WMI_STA_PS_PARAM_ */
+ __le32 param_value;
+} __packed;
+
+/* No MIMO power save */
+#define WMI_STA_MIMO_PS_MODE_DISABLE
+/* mimo powersave mode static*/
+#define WMI_STA_MIMO_PS_MODE_STATIC
+/* mimo powersave mode dynamic */
+#define WMI_STA_MIMO_PS_MODE_DYNAMIC
+
+struct wmi_sta_mimo_ps_mode_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* mimo powersave mode as defined above */
+ __le32 mimo_pwrsave_mode;
+} __packed;
+
+/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
+enum wmi_ap_ps_param_uapsd {
+ WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+/* U-APSD maximum service period of peer station */
+enum wmi_ap_ps_peer_param_max_sp {
+ WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
+ MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
+};
+
+/*
+ * AP power save parameter
+ * Set a power save specific parameter for a peer station
+ */
+enum wmi_ap_ps_peer_param {
+ /* Set uapsd configuration for a given peer.
+ *
+ * Include the delivery and trigger enabled state for every AC.
+ * The host MLME needs to set this based on AP capability and stations
+ * request Set in the association request received from the station.
+ *
+ * Lower 8 bits of the value specify the UAPSD configuration.
+ *
+ * (see enum wmi_ap_ps_param_uapsd)
+ * The default value is 0.
+ */
+ WMI_AP_PS_PEER_PARAM_UAPSD = 0,
+
+ /*
+ * Set the service period for a UAPSD capable station
+ *
+ * The service period from wme ie in the (re)assoc request frame.
+ *
+ * (see enum wmi_ap_ps_peer_param_max_sp)
+ */
+ WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
+
+ /* Time in seconds for aging out buffered frames for STA in PS */
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
+};
+
+struct wmi_ap_ps_peer_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+
+ /* AP powersave param (see enum wmi_ap_ps_peer_param) */
+ __le32 param_id;
+
+ /* AP powersave param value */
+ __le32 param_value;
+} __packed;
+
+/* 128 clients = 4 words */
+#define WMI_TIM_BITMAP_ARRAY_SIZE 4
+
+struct wmi_tim_info {
+ __le32 tim_len;
+ __le32 tim_mcast;
+ __le32 tim_bitmap[WMI_TIM_BITMAP_ARRAY_SIZE];
+ __le32 tim_changed;
+ __le32 tim_num_ps_pending;
+} __packed;
+
+struct wmi_tim_info_arg {
+ __le32 tim_len;
+ __le32 tim_mcast;
+ const __le32 *tim_bitmap;
+ __le32 tim_changed;
+ __le32 tim_num_ps_pending;
+} __packed;
+
+/* Maximum number of NOA Descriptors supported */
+#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
+#define WMI_P2P_OPPPS_ENABLE_BIT BIT(0)
+#define WMI_P2P_OPPPS_CTWINDOW_OFFSET 1
+#define WMI_P2P_NOA_CHANGED_BIT BIT(0)
+
+struct wmi_p2p_noa_info {
+ /* Bit 0 - Flag to indicate an update in NOA schedule
+ * Bits 7-1 - Reserved
+ */
+ u8 changed;
+ /* NOA index */
+ u8 index;
+ /* Bit 0 - Opp PS state of the AP
+ * Bits 1-7 - Ctwindow in TUs
+ */
+ u8 ctwindow_oppps;
+ /* Number of NOA descriptors */
+ u8 num_descriptors;
+
+ struct wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_bcn_info {
+ struct wmi_tim_info tim_info;
+ struct wmi_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_host_swba_event {
+ __le32 vdev_map;
+ struct wmi_bcn_info bcn_info[];
+} __packed;
+
+struct wmi_10_2_4_bcn_info {
+ struct wmi_tim_info tim_info;
+ /* The 10.2.4 FW doesn't have p2p NOA info */
+} __packed;
+
+struct wmi_10_2_4_host_swba_event {
+ __le32 vdev_map;
+ struct wmi_10_2_4_bcn_info bcn_info[];
+} __packed;
+
+/* 16 words = 512 client + 1 word = for guard */
+#define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17
+
+struct wmi_10_4_tim_info {
+ __le32 tim_len;
+ __le32 tim_mcast;
+ __le32 tim_bitmap[WMI_10_4_TIM_BITMAP_ARRAY_SIZE];
+ __le32 tim_changed;
+ __le32 tim_num_ps_pending;
+} __packed;
+
+#define WMI_10_4_P2P_MAX_NOA_DESCRIPTORS 1
+
+struct wmi_10_4_p2p_noa_info {
+ /* Bit 0 - Flag to indicate an update in NOA schedule
+ * Bits 7-1 - Reserved
+ */
+ u8 changed;
+ /* NOA index */
+ u8 index;
+ /* Bit 0 - Opp PS state of the AP
+ * Bits 1-7 - Ctwindow in TUs
+ */
+ u8 ctwindow_oppps;
+ /* Number of NOA descriptors */
+ u8 num_descriptors;
+
+ struct wmi_p2p_noa_descriptor
+ noa_descriptors[WMI_10_4_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_10_4_bcn_info {
+ struct wmi_10_4_tim_info tim_info;
+ struct wmi_10_4_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_10_4_host_swba_event {
+ __le32 vdev_map;
+ struct wmi_10_4_bcn_info bcn_info[];
+} __packed;
+
+#define WMI_MAX_AP_VDEV 16
+
+struct wmi_tbtt_offset_event {
+ __le32 vdev_map;
+ __le32 tbttoffset_list[WMI_MAX_AP_VDEV];
+} __packed;
+
+struct wmi_peer_create_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_type;
+} __packed;
+
+enum wmi_peer_type {
+ WMI_PEER_TYPE_DEFAULT = 0,
+ WMI_PEER_TYPE_BSS = 1,
+ WMI_PEER_TYPE_TDLS = 2,
+};
+
+struct wmi_peer_delete_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_flush_tids_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_tid_bitmap;
+} __packed;
+
+struct wmi_fixed_rate {
+ /*
+ * rate mode . 0: disable fixed rate (auto rate)
+ * 1: legacy (non 11n) rate specified as ieee rate 2*Mbps
+ * 2: ht20 11n rate specified as mcs index
+ * 3: ht40 11n rate specified as mcs index
+ */
+ __le32 rate_mode;
+ /*
+ * 4 rate values for 4 rate series. series 0 is stored in byte 0 (LSB)
+ * and series 3 is stored at byte 3 (MSB)
+ */
+ __le32 rate_series;
+ /*
+ * 4 retry counts for 4 rate series. retry count for rate 0 is stored
+ * in byte 0 (LSB) and retry count for rate 3 is stored at byte 3
+ * (MSB)
+ */
+ __le32 rate_retries;
+} __packed;
+
+struct wmi_peer_fixed_rate_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* fixed rate */
+ struct wmi_fixed_rate peer_fixed_rate;
+} __packed;
+
+#define WMI_MGMT_TID 17
+
+struct wmi_addba_clear_resp_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_addba_send_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+ /* Buffer/Window size*/
+ __le32 buffersize;
+} __packed;
+
+struct wmi_delba_send_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+ /* Is Initiator */
+ __le32 initiator;
+ /* Reason code */
+ __le32 reasoncode;
+} __packed;
+
+struct wmi_addba_setresponse_cmd {
+ /* unique id identifying the vdev, generated by the caller */
+ __le32 vdev_id;
+ /* peer mac address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+ /* status code */
+ __le32 statuscode;
+} __packed;
+
+struct wmi_send_singleamsdu_cmd {
+ /* unique id identifying the vdev, generated by the caller */
+ __le32 vdev_id;
+ /* peer mac address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+} __packed;
+
+enum wmi_peer_smps_state {
+ WMI_PEER_SMPS_PS_NONE = 0x0,
+ WMI_PEER_SMPS_STATIC = 0x1,
+ WMI_PEER_SMPS_DYNAMIC = 0x2
+};
+
+enum wmi_peer_chwidth {
+ WMI_PEER_CHWIDTH_20MHZ = 0,
+ WMI_PEER_CHWIDTH_40MHZ = 1,
+ WMI_PEER_CHWIDTH_80MHZ = 2,
+ WMI_PEER_CHWIDTH_160MHZ = 3,
+};
+
+enum wmi_peer_param {
+ WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
+ WMI_PEER_AMPDU = 0x2,
+ WMI_PEER_AUTHORIZE = 0x3,
+ WMI_PEER_CHAN_WIDTH = 0x4,
+ WMI_PEER_NSS = 0x5,
+ WMI_PEER_USE_4ADDR = 0x6,
+ WMI_PEER_USE_FIXED_PWR = 0x8,
+ WMI_PEER_PARAM_FIXED_RATE = 0x9,
+ WMI_PEER_DEBUG = 0xa,
+ WMI_PEER_PHYMODE = 0xd,
+ WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
+};
+
+struct wmi_peer_set_param_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+#define MAX_SUPPORTED_RATES 128
+
+struct wmi_rate_set {
+ /* total number of rates */
+ __le32 num_rates;
+ /*
+ * rates (each 8bit value) packed into a 32 bit word.
+ * the rates are filled from least significant byte to most
+ * significant byte.
+ */
+ __le32 rates[(MAX_SUPPORTED_RATES / 4) + 1];
+} __packed;
+
+struct wmi_rate_set_arg {
+ unsigned int num_rates;
+ u8 rates[MAX_SUPPORTED_RATES];
+};
+
+/*
+ * NOTE: It would bea good idea to represent the Tx MCS
+ * info in one word and Rx in another word. This is split
+ * into multiple words for convenience
+ */
+struct wmi_vht_rate_set {
+ __le32 rx_max_rate; /* Max Rx data rate */
+ __le32 rx_mcs_set; /* Negotiated RX VHT rates */
+ __le32 tx_max_rate; /* Max Tx data rate */
+ __le32 tx_mcs_set; /* Negotiated TX VHT rates */
+} __packed;
+
+struct wmi_vht_rate_set_arg {
+ u32 rx_max_rate;
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ u32 tx_mcs_set;
+};
+
+struct wmi_peer_set_rates_cmd {
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* legacy rate set */
+ struct wmi_rate_set peer_legacy_rates;
+ /* ht rate set */
+ struct wmi_rate_set peer_ht_rates;
+} __packed;
+
+struct wmi_peer_set_q_empty_callback_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ __le32 callback_enable;
+} __packed;
+
+struct wmi_peer_flags_map {
+ u32 auth;
+ u32 qos;
+ u32 need_ptk_4_way;
+ u32 need_gtk_2_way;
+ u32 apsd;
+ u32 ht;
+ u32 bw40;
+ u32 stbc;
+ u32 ldbc;
+ u32 dyn_mimops;
+ u32 static_mimops;
+ u32 spatial_mux;
+ u32 vht;
+ u32 bw80;
+ u32 vht_2g;
+ u32 pmf;
+ u32 bw160;
+};
+
+enum wmi_peer_flags {
+ WMI_PEER_AUTH = 0x00000001,
+ WMI_PEER_QOS = 0x00000002,
+ WMI_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_PEER_APSD = 0x00000800,
+ WMI_PEER_HT = 0x00001000,
+ WMI_PEER_40MHZ = 0x00002000,
+ WMI_PEER_STBC = 0x00008000,
+ WMI_PEER_LDPC = 0x00010000,
+ WMI_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_PEER_VHT = 0x02000000,
+ WMI_PEER_80MHZ = 0x04000000,
+ WMI_PEER_VHT_2G = 0x08000000,
+ WMI_PEER_PMF = 0x10000000,
+ WMI_PEER_160MHZ = 0x20000000
+};
+
+enum wmi_10x_peer_flags {
+ WMI_10X_PEER_AUTH = 0x00000001,
+ WMI_10X_PEER_QOS = 0x00000002,
+ WMI_10X_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_10X_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_10X_PEER_APSD = 0x00000800,
+ WMI_10X_PEER_HT = 0x00001000,
+ WMI_10X_PEER_40MHZ = 0x00002000,
+ WMI_10X_PEER_STBC = 0x00008000,
+ WMI_10X_PEER_LDPC = 0x00010000,
+ WMI_10X_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_10X_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_10X_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_10X_PEER_VHT = 0x02000000,
+ WMI_10X_PEER_80MHZ = 0x04000000,
+ WMI_10X_PEER_160MHZ = 0x20000000
+};
+
+enum wmi_10_2_peer_flags {
+ WMI_10_2_PEER_AUTH = 0x00000001,
+ WMI_10_2_PEER_QOS = 0x00000002,
+ WMI_10_2_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_10_2_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_10_2_PEER_APSD = 0x00000800,
+ WMI_10_2_PEER_HT = 0x00001000,
+ WMI_10_2_PEER_40MHZ = 0x00002000,
+ WMI_10_2_PEER_STBC = 0x00008000,
+ WMI_10_2_PEER_LDPC = 0x00010000,
+ WMI_10_2_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_10_2_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_10_2_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_10_2_PEER_VHT = 0x02000000,
+ WMI_10_2_PEER_80MHZ = 0x04000000,
+ WMI_10_2_PEER_VHT_2G = 0x08000000,
+ WMI_10_2_PEER_PMF = 0x10000000,
+ WMI_10_2_PEER_160MHZ = 0x20000000
+};
+
+/*
+ * Peer rate capabilities.
+ *
+ * This is of interest to the ratecontrol
+ * module which resides in the firmware. The bit definitions are
+ * consistent with that defined in if_athrate.c.
+ */
+#define WMI_RC_DS_FLAG 0x01
+#define WMI_RC_CW40_FLAG 0x02
+#define WMI_RC_SGI_FLAG 0x04
+#define WMI_RC_HT_FLAG 0x08
+#define WMI_RC_RTSCTS_FLAG 0x10
+#define WMI_RC_TX_STBC_FLAG 0x20
+#define WMI_RC_RX_STBC_FLAG 0xC0
+#define WMI_RC_RX_STBC_FLAG_S 6
+#define WMI_RC_WEP_TKIP_FLAG 0x100
+#define WMI_RC_TS_FLAG 0x200
+#define WMI_RC_UAPSD_FLAG 0x400
+
+/* Maximum listen interval supported by hw in units of beacon interval */
+#define ATH10K_MAX_HW_LISTEN_INTERVAL 5
+
+struct wmi_common_peer_assoc_complete_cmd {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 vdev_id;
+ __le32 peer_new_assoc; /* 1=assoc, 0=reassoc */
+ __le32 peer_associd; /* 16 LSBs */
+ __le32 peer_flags;
+ __le32 peer_caps; /* 16 LSBs */
+ __le32 peer_listen_intval;
+ __le32 peer_ht_caps;
+ __le32 peer_max_mpdu;
+ __le32 peer_mpdu_density; /* 0..16 */
+ __le32 peer_rate_caps;
+ struct wmi_rate_set peer_legacy_rates;
+ struct wmi_rate_set peer_ht_rates;
+ __le32 peer_nss; /* num of spatial streams */
+ __le32 peer_vht_caps;
+ __le32 peer_phymode;
+ struct wmi_vht_rate_set peer_vht_rates;
+};
+
+struct wmi_main_peer_assoc_complete_cmd {
+ struct wmi_common_peer_assoc_complete_cmd cmd;
+
+ /* HT Operation Element of the peer. Five bytes packed in 2
+ * INT32 array and filled from lsb to msb.
+ */
+ __le32 peer_ht_info[2];
+} __packed;
+
+struct wmi_10_1_peer_assoc_complete_cmd {
+ struct wmi_common_peer_assoc_complete_cmd cmd;
+} __packed;
+
+#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_LSB 0
+#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_MASK 0x0f
+#define WMI_PEER_ASSOC_INFO0_MAX_NSS_LSB 4
+#define WMI_PEER_ASSOC_INFO0_MAX_NSS_MASK 0xf0
+
+struct wmi_10_2_peer_assoc_complete_cmd {
+ struct wmi_common_peer_assoc_complete_cmd cmd;
+ __le32 info0; /* WMI_PEER_ASSOC_INFO0_ */
+} __packed;
+
+/* NSS Mapping to FW */
+#define WMI_PEER_NSS_MAP_ENABLE BIT(31)
+#define WMI_PEER_NSS_160MHZ_MASK GENMASK(2, 0)
+#define WMI_PEER_NSS_80_80MHZ_MASK GENMASK(5, 3)
+
+struct wmi_10_4_peer_assoc_complete_cmd {
+ struct wmi_10_2_peer_assoc_complete_cmd cmd;
+ __le32 peer_bw_rxnss_override;
+} __packed;
+
+struct wmi_peer_assoc_complete_arg {
+ u8 addr[ETH_ALEN];
+ u32 vdev_id;
+ bool peer_reassoc;
+ u16 peer_aid;
+ u32 peer_flags; /* see %WMI_PEER_ */
+ u16 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density; /* 0..16 */
+ u32 peer_rate_caps; /* see %WMI_RC_ */
+ struct wmi_rate_set_arg peer_legacy_rates;
+ struct wmi_rate_set_arg peer_ht_rates;
+ u32 peer_num_spatial_streams;
+ u32 peer_vht_caps;
+ enum wmi_phy_mode peer_phymode;
+ struct wmi_vht_rate_set_arg peer_vht_rates;
+ u32 peer_bw_rxnss_override;
+};
+
+struct wmi_peer_add_wds_entry_cmd {
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* wds MAC addr */
+ struct wmi_mac_addr wds_macaddr;
+} __packed;
+
+struct wmi_peer_remove_wds_entry_cmd {
+ /* wds MAC addr */
+ struct wmi_mac_addr wds_macaddr;
+} __packed;
+
+struct wmi_peer_q_empty_callback_event {
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+/*
+ * Channel info WMI event
+ */
+struct wmi_chan_info_event {
+ __le32 err_code;
+ __le32 freq;
+ __le32 cmd_flags;
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+} __packed;
+
+struct wmi_10_4_chan_info_event {
+ __le32 err_code;
+ __le32 freq;
+ __le32 cmd_flags;
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+ __le32 chan_tx_pwr_range;
+ __le32 chan_tx_pwr_tp;
+ __le32 rx_frame_count;
+} __packed;
+
+struct wmi_peer_sta_kickout_event {
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
+#define WMI_CHAN_INFO_FLAG_PRE_COMPLETE BIT(1)
+
+/* Beacon filter wmi command info */
+#define BCN_FLT_MAX_SUPPORTED_IES 256
+#define BCN_FLT_MAX_ELEMS_IE_LIST (BCN_FLT_MAX_SUPPORTED_IES / 32)
+
+struct bss_bcn_stats {
+ __le32 vdev_id;
+ __le32 bss_bcnsdropped;
+ __le32 bss_bcnsdelivered;
+} __packed;
+
+struct bcn_filter_stats {
+ __le32 bcns_dropped;
+ __le32 bcns_delivered;
+ __le32 activefilters;
+ struct bss_bcn_stats bss_stats;
+} __packed;
+
+struct wmi_add_bcn_filter_cmd {
+ u32 vdev_id;
+ u32 ie_map[BCN_FLT_MAX_ELEMS_IE_LIST];
+} __packed;
+
+enum wmi_sta_keepalive_method {
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+ WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2,
+};
+
+#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+
+/* Firmware crashes if keepalive interval exceeds this limit */
+#define WMI_STA_KEEPALIVE_INTERVAL_MAX_SECONDS 0xffff
+
+/* note: ip4 addresses are in network byte order, i.e. big endian */
+struct wmi_sta_keepalive_arp_resp {
+ __be32 src_ip4_addr;
+ __be32 dest_ip4_addr;
+ struct wmi_mac_addr dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_cmd {
+ __le32 vdev_id;
+ __le32 enabled;
+ __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
+ __le32 interval; /* in seconds */
+ struct wmi_sta_keepalive_arp_resp arp_resp;
+} __packed;
+
+struct wmi_sta_keepalive_arg {
+ u32 vdev_id;
+ u32 enabled;
+ u32 method;
+ u32 interval;
+ __be32 src_ip4_addr;
+ __be32 dest_ip4_addr;
+ const u8 dest_mac_addr[ETH_ALEN];
+};
+
+enum wmi_force_fw_hang_type {
+ WMI_FORCE_FW_HANG_ASSERT = 1,
+ WMI_FORCE_FW_HANG_NO_DETECT,
+ WMI_FORCE_FW_HANG_CTRL_EP_FULL,
+ WMI_FORCE_FW_HANG_EMPTY_POINT,
+ WMI_FORCE_FW_HANG_STACK_OVERFLOW,
+ WMI_FORCE_FW_HANG_INFINITE_LOOP,
+};
+
+#define WMI_FORCE_FW_HANG_RANDOM_TIME 0xFFFFFFFF
+
+struct wmi_force_fw_hang_cmd {
+ __le32 type;
+ __le32 delay_ms;
+} __packed;
+
+enum wmi_pdev_reset_mode_type {
+ WMI_RST_MODE_TX_FLUSH = 1,
+ WMI_RST_MODE_WARM_RESET,
+ WMI_RST_MODE_COLD_RESET,
+ WMI_RST_MODE_WARM_RESET_RESTORE_CAL,
+ WMI_RST_MODE_COLD_RESET_RESTORE_CAL,
+ WMI_RST_MODE_MAX,
+};
+
+enum ath10k_dbglog_level {
+ ATH10K_DBGLOG_LEVEL_VERBOSE = 0,
+ ATH10K_DBGLOG_LEVEL_INFO = 1,
+ ATH10K_DBGLOG_LEVEL_WARN = 2,
+ ATH10K_DBGLOG_LEVEL_ERR = 3,
+};
+
+/* VAP ids to enable dbglog */
+#define ATH10K_DBGLOG_CFG_VAP_LOG_LSB 0
+#define ATH10K_DBGLOG_CFG_VAP_LOG_MASK 0x0000ffff
+
+/* to enable dbglog in the firmware */
+#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_LSB 16
+#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_MASK 0x00010000
+
+/* timestamp resolution */
+#define ATH10K_DBGLOG_CFG_RESOLUTION_LSB 17
+#define ATH10K_DBGLOG_CFG_RESOLUTION_MASK 0x000E0000
+
+/* number of queued messages before sending them to the host */
+#define ATH10K_DBGLOG_CFG_REPORT_SIZE_LSB 20
+#define ATH10K_DBGLOG_CFG_REPORT_SIZE_MASK 0x0ff00000
+
+/*
+ * Log levels to enable. This defines the minimum level to enable, this is
+ * not a bitmask. See enum ath10k_dbglog_level for the values.
+ */
+#define ATH10K_DBGLOG_CFG_LOG_LVL_LSB 28
+#define ATH10K_DBGLOG_CFG_LOG_LVL_MASK 0x70000000
+
+/*
+ * Note: this is a cleaned up version of a struct firmware uses. For
+ * example, config_valid was hidden inside an array.
+ */
+struct wmi_dbglog_cfg_cmd {
+ /* bitmask to hold mod id config*/
+ __le32 module_enable;
+
+ /* see ATH10K_DBGLOG_CFG_ */
+ __le32 config_enable;
+
+ /* mask of module id bits to be changed */
+ __le32 module_valid;
+
+ /* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
+ __le32 config_valid;
+} __packed;
+
+struct wmi_10_4_dbglog_cfg_cmd {
+ /* bitmask to hold mod id config*/
+ __le64 module_enable;
+
+ /* see ATH10K_DBGLOG_CFG_ */
+ __le32 config_enable;
+
+ /* mask of module id bits to be changed */
+ __le64 module_valid;
+
+ /* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
+ __le32 config_valid;
+} __packed;
+
+enum wmi_roam_reason {
+ WMI_ROAM_REASON_BETTER_AP = 1,
+ WMI_ROAM_REASON_BEACON_MISS = 2,
+ WMI_ROAM_REASON_LOW_RSSI = 3,
+ WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+ WMI_ROAM_REASON_HO_FAILED = 5,
+
+ /* keep last */
+ WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_ev {
+ __le32 vdev_id;
+ __le32 reason;
+} __packed;
+
+#define ATH10K_FRAGMT_THRESHOLD_MIN 540
+#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
+
+#define WMI_MAX_EVENT 0x1000
+/* Maximum number of pending TXed WMI packets */
+#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
+
+/* By default disable power save for IBSS */
+#define ATH10K_DEFAULT_ATIM 0
+
+#define WMI_MAX_MEM_REQS 16
+
+struct wmi_scan_ev_arg {
+ __le32 event_type; /* %WMI_SCAN_EVENT_ */
+ __le32 reason; /* %WMI_SCAN_REASON_ */
+ __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+ __le32 scan_req_id;
+ __le32 scan_id;
+ __le32 vdev_id;
+};
+
+struct mgmt_tx_compl_params {
+ u32 desc_id;
+ u32 status;
+ u32 ppdu_id;
+ int ack_rssi;
+};
+
+struct wmi_tlv_mgmt_tx_compl_ev_arg {
+ __le32 desc_id;
+ __le32 status;
+ __le32 pdev_id;
+ __le32 ppdu_id;
+ __le32 ack_rssi;
+};
+
+struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg {
+ __le32 num_reports;
+ const __le32 *desc_ids;
+ const __le32 *status;
+ const __le32 *ppdu_ids;
+ const __le32 *ack_rssi;
+};
+
+struct wmi_peer_delete_resp_ev_arg {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_addr;
+};
+
+#define WMI_MGMT_RX_NUM_RSSI 4
+struct wmi_mgmt_rx_ev_arg {
+ __le32 channel;
+ __le32 snr;
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status; /* %WMI_RX_STATUS_ */
+ struct wmi_mgmt_rx_ext_info ext_info;
+ __le32 rssi[WMI_MGMT_RX_NUM_RSSI];
+};
+
+struct wmi_ch_info_ev_arg {
+ __le32 err_code;
+ __le32 freq;
+ __le32 cmd_flags;
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+ __le32 chan_tx_pwr_range;
+ __le32 chan_tx_pwr_tp;
+ __le32 rx_frame_count;
+ __le32 my_bss_rx_cycle_count;
+ __le32 rx_11b_mode_data_duration;
+ __le32 tx_frame_cnt;
+ __le32 mac_clk_mhz;
+};
+
+/* From 10.4 firmware, not sure all have the same values. */
+enum wmi_vdev_start_status {
+ WMI_VDEV_START_OK = 0,
+ WMI_VDEV_START_CHAN_INVALID,
+};
+
+struct wmi_vdev_start_ev_arg {
+ __le32 vdev_id;
+ __le32 req_id;
+ __le32 resp_type; /* %WMI_VDEV_RESP_ */
+ __le32 status; /* See wmi_vdev_start_status enum above */
+};
+
+struct wmi_peer_kick_ev_arg {
+ const u8 *mac_addr;
+};
+
+struct wmi_swba_ev_arg {
+ __le32 vdev_map;
+ struct wmi_tim_info_arg tim_info[WMI_MAX_AP_VDEV];
+ const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV];
+};
+
+struct wmi_phyerr_ev_arg {
+ u32 tsf_timestamp;
+ u16 freq1;
+ u16 freq2;
+ u8 rssi_combined;
+ u8 chan_width_mhz;
+ u8 phy_err_code;
+ u16 nf_chains[4];
+ u32 buf_len;
+ const u8 *buf;
+ u8 hdr_len;
+};
+
+struct wmi_phyerr_hdr_arg {
+ u32 num_phyerrs;
+ u32 tsf_l32;
+ u32 tsf_u32;
+ u32 buf_len;
+ const void *phyerrs;
+};
+
+struct wmi_dfs_status_ev_arg {
+ u32 status;
+};
+
+struct wmi_svc_rdy_ev_arg {
+ __le32 min_tx_power;
+ __le32 max_tx_power;
+ __le32 ht_cap;
+ __le32 vht_cap;
+ __le32 vht_supp_mcs;
+ __le32 sw_ver0;
+ __le32 sw_ver1;
+ __le32 fw_build;
+ __le32 phy_capab;
+ __le32 num_rf_chains;
+ __le32 eeprom_rd;
+ __le32 num_mem_reqs;
+ __le32 low_2ghz_chan;
+ __le32 high_2ghz_chan;
+ __le32 low_5ghz_chan;
+ __le32 high_5ghz_chan;
+ __le32 sys_cap_info;
+ const __le32 *service_map;
+ size_t service_map_len;
+ const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
+};
+
+struct wmi_svc_avail_ev_arg {
+ bool service_map_ext_valid;
+ __le32 service_map_ext_len;
+ const __le32 *service_map_ext;
+};
+
+struct wmi_rdy_ev_arg {
+ __le32 sw_version;
+ __le32 abi_version;
+ __le32 status;
+ const u8 *mac_addr;
+};
+
+struct wmi_roam_ev_arg {
+ __le32 vdev_id;
+ __le32 reason;
+ __le32 rssi;
+};
+
+struct wmi_echo_ev_arg {
+ __le32 value;
+};
+
+struct wmi_pdev_temperature_event {
+ /* temperature value in Celsius degree */
+ __le32 temperature;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_event {
+ __le32 freq;
+ __le32 noise_floor;
+ __le64 cycle_busy;
+ __le64 cycle_total;
+ __le64 cycle_tx;
+ __le64 cycle_rx;
+ __le64 cycle_rx_bss;
+ __le32 reserved;
+} __packed;
+
+/* WOW structures */
+enum wmi_wow_wakeup_event {
+ WOW_BMISS_EVENT = 0,
+ WOW_BETTER_AP_EVENT,
+ WOW_DEAUTH_RECVD_EVENT,
+ WOW_MAGIC_PKT_RECVD_EVENT,
+ WOW_GTK_ERR_EVENT,
+ WOW_FOURWAY_HSHAKE_EVENT,
+ WOW_EAPOL_RECVD_EVENT,
+ WOW_NLO_DETECTED_EVENT,
+ WOW_DISASSOC_RECVD_EVENT,
+ WOW_PATTERN_MATCH_EVENT,
+ WOW_CSA_IE_EVENT,
+ WOW_PROBE_REQ_WPS_IE_EVENT,
+ WOW_AUTH_REQ_EVENT,
+ WOW_ASSOC_REQ_EVENT,
+ WOW_HTT_EVENT,
+ WOW_RA_MATCH_EVENT,
+ WOW_HOST_AUTO_SHUTDOWN_EVENT,
+ WOW_IOAC_MAGIC_EVENT,
+ WOW_IOAC_SHORT_EVENT,
+ WOW_IOAC_EXTEND_EVENT,
+ WOW_IOAC_TIMER_EVENT,
+ WOW_DFS_PHYERR_RADAR_EVENT,
+ WOW_BEACON_EVENT,
+ WOW_CLIENT_KICKOUT_EVENT,
+ WOW_EVENT_MAX,
+};
+
+#define C2S(x) case x: return #x
+
+static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
+{
+ switch (ev) {
+ C2S(WOW_BMISS_EVENT);
+ C2S(WOW_BETTER_AP_EVENT);
+ C2S(WOW_DEAUTH_RECVD_EVENT);
+ C2S(WOW_MAGIC_PKT_RECVD_EVENT);
+ C2S(WOW_GTK_ERR_EVENT);
+ C2S(WOW_FOURWAY_HSHAKE_EVENT);
+ C2S(WOW_EAPOL_RECVD_EVENT);
+ C2S(WOW_NLO_DETECTED_EVENT);
+ C2S(WOW_DISASSOC_RECVD_EVENT);
+ C2S(WOW_PATTERN_MATCH_EVENT);
+ C2S(WOW_CSA_IE_EVENT);
+ C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
+ C2S(WOW_AUTH_REQ_EVENT);
+ C2S(WOW_ASSOC_REQ_EVENT);
+ C2S(WOW_HTT_EVENT);
+ C2S(WOW_RA_MATCH_EVENT);
+ C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
+ C2S(WOW_IOAC_MAGIC_EVENT);
+ C2S(WOW_IOAC_SHORT_EVENT);
+ C2S(WOW_IOAC_EXTEND_EVENT);
+ C2S(WOW_IOAC_TIMER_EVENT);
+ C2S(WOW_DFS_PHYERR_RADAR_EVENT);
+ C2S(WOW_BEACON_EVENT);
+ C2S(WOW_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_EVENT_MAX);
+ default:
+ return NULL;
+ }
+}
+
+enum wmi_wow_wake_reason {
+ WOW_REASON_UNSPECIFIED = -1,
+ WOW_REASON_NLOD = 0,
+ WOW_REASON_AP_ASSOC_LOST,
+ WOW_REASON_LOW_RSSI,
+ WOW_REASON_DEAUTH_RECVD,
+ WOW_REASON_DISASSOC_RECVD,
+ WOW_REASON_GTK_HS_ERR,
+ WOW_REASON_EAP_REQ,
+ WOW_REASON_FOURWAY_HS_RECV,
+ WOW_REASON_TIMER_INTR_RECV,
+ WOW_REASON_PATTERN_MATCH_FOUND,
+ WOW_REASON_RECV_MAGIC_PATTERN,
+ WOW_REASON_P2P_DISC,
+ WOW_REASON_WLAN_HB,
+ WOW_REASON_CSA_EVENT,
+ WOW_REASON_PROBE_REQ_WPS_IE_RECV,
+ WOW_REASON_AUTH_REQ_RECV,
+ WOW_REASON_ASSOC_REQ_RECV,
+ WOW_REASON_HTT_EVENT,
+ WOW_REASON_RA_MATCH,
+ WOW_REASON_HOST_AUTO_SHUTDOWN,
+ WOW_REASON_IOAC_MAGIC_EVENT,
+ WOW_REASON_IOAC_SHORT_EVENT,
+ WOW_REASON_IOAC_EXTEND_EVENT,
+ WOW_REASON_IOAC_TIMER_EVENT,
+ WOW_REASON_ROAM_HO,
+ WOW_REASON_DFS_PHYERR_RADADR_EVENT,
+ WOW_REASON_BEACON_RECV,
+ WOW_REASON_CLIENT_KICKOUT_EVENT,
+ WOW_REASON_DEBUG_TEST = 0xFF,
+};
+
+static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
+{
+ switch (reason) {
+ C2S(WOW_REASON_UNSPECIFIED);
+ C2S(WOW_REASON_NLOD);
+ C2S(WOW_REASON_AP_ASSOC_LOST);
+ C2S(WOW_REASON_LOW_RSSI);
+ C2S(WOW_REASON_DEAUTH_RECVD);
+ C2S(WOW_REASON_DISASSOC_RECVD);
+ C2S(WOW_REASON_GTK_HS_ERR);
+ C2S(WOW_REASON_EAP_REQ);
+ C2S(WOW_REASON_FOURWAY_HS_RECV);
+ C2S(WOW_REASON_TIMER_INTR_RECV);
+ C2S(WOW_REASON_PATTERN_MATCH_FOUND);
+ C2S(WOW_REASON_RECV_MAGIC_PATTERN);
+ C2S(WOW_REASON_P2P_DISC);
+ C2S(WOW_REASON_WLAN_HB);
+ C2S(WOW_REASON_CSA_EVENT);
+ C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
+ C2S(WOW_REASON_AUTH_REQ_RECV);
+ C2S(WOW_REASON_ASSOC_REQ_RECV);
+ C2S(WOW_REASON_HTT_EVENT);
+ C2S(WOW_REASON_RA_MATCH);
+ C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
+ C2S(WOW_REASON_IOAC_MAGIC_EVENT);
+ C2S(WOW_REASON_IOAC_SHORT_EVENT);
+ C2S(WOW_REASON_IOAC_EXTEND_EVENT);
+ C2S(WOW_REASON_IOAC_TIMER_EVENT);
+ C2S(WOW_REASON_ROAM_HO);
+ C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
+ C2S(WOW_REASON_BEACON_RECV);
+ C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_REASON_DEBUG_TEST);
+ default:
+ return NULL;
+ }
+}
+
+#undef C2S
+
+struct wmi_wow_ev_arg {
+ u32 vdev_id;
+ u32 flag;
+ enum wmi_wow_wake_reason wake_reason;
+ u32 data_len;
+};
+
+#define WOW_MIN_PATTERN_SIZE 1
+#define WOW_MAX_PATTERN_SIZE 148
+#define WOW_MAX_PKT_OFFSET 128
+#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \
+ sizeof(struct rfc1042_hdr))
+#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \
+ offsetof(struct ieee80211_hdr_3addr, addr1))
+
+enum wmi_tdls_state {
+ WMI_TDLS_DISABLE,
+ WMI_TDLS_ENABLE_PASSIVE,
+ WMI_TDLS_ENABLE_ACTIVE,
+ WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL,
+};
+
+enum wmi_tdls_peer_state {
+ WMI_TDLS_PEER_STATE_PEERING,
+ WMI_TDLS_PEER_STATE_CONNECTED,
+ WMI_TDLS_PEER_STATE_TEARDOWN,
+};
+
+struct wmi_tdls_peer_update_cmd_arg {
+ u32 vdev_id;
+ enum wmi_tdls_peer_state peer_state;
+ u8 addr[ETH_ALEN];
+};
+
+#define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32
+
+#define WMI_TDLS_PEER_SP_MASK 0x60
+#define WMI_TDLS_PEER_SP_LSB 5
+
+enum wmi_tdls_options {
+ WMI_TDLS_OFFCHAN_EN = BIT(0),
+ WMI_TDLS_BUFFER_STA_EN = BIT(1),
+ WMI_TDLS_SLEEP_STA_EN = BIT(2),
+};
+
+enum {
+ WMI_TDLS_PEER_QOS_AC_VO = BIT(0),
+ WMI_TDLS_PEER_QOS_AC_VI = BIT(1),
+ WMI_TDLS_PEER_QOS_AC_BK = BIT(2),
+ WMI_TDLS_PEER_QOS_AC_BE = BIT(3),
+};
+
+struct wmi_tdls_peer_capab_arg {
+ u8 peer_uapsd_queues;
+ u8 peer_max_sp;
+ u32 buff_sta_support;
+ u32 off_chan_support;
+ u32 peer_curr_operclass;
+ u32 self_curr_operclass;
+ u32 peer_chan_len;
+ u32 peer_operclass_len;
+ u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+ u32 is_peer_responder;
+ u32 pref_offchan_num;
+ u32 pref_offchan_bw;
+};
+
+struct wmi_10_4_tdls_set_state_cmd {
+ __le32 vdev_id;
+ __le32 state;
+ __le32 notification_interval_ms;
+ __le32 tx_discovery_threshold;
+ __le32 tx_teardown_threshold;
+ __le32 rssi_teardown_threshold;
+ __le32 rssi_delta;
+ __le32 tdls_options;
+ __le32 tdls_peer_traffic_ind_window;
+ __le32 tdls_peer_traffic_response_timeout_ms;
+ __le32 tdls_puapsd_mask;
+ __le32 tdls_puapsd_inactivity_time_ms;
+ __le32 tdls_puapsd_rx_frame_threshold;
+ __le32 teardown_notification_ms;
+ __le32 tdls_peer_kickout_threshold;
+} __packed;
+
+struct wmi_tdls_peer_capabilities {
+ __le32 peer_qos;
+ __le32 buff_sta_support;
+ __le32 off_chan_support;
+ __le32 peer_curr_operclass;
+ __le32 self_curr_operclass;
+ __le32 peer_chan_len;
+ __le32 peer_operclass_len;
+ u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+ __le32 is_peer_responder;
+ __le32 pref_offchan_num;
+ __le32 pref_offchan_bw;
+ union {
+ /* to match legacy implementation allocate room for
+ * at least one record even if peer_chan_len is 0
+ */
+ struct wmi_channel peer_chan_min_allocation;
+ DECLARE_FLEX_ARRAY(struct wmi_channel, peer_chan_list);
+ };
+} __packed;
+
+struct wmi_10_4_tdls_peer_update_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_state;
+ __le32 reserved[4];
+ struct wmi_tdls_peer_capabilities peer_capab;
+} __packed;
+
+enum wmi_tdls_peer_reason {
+ WMI_TDLS_TEARDOWN_REASON_TX,
+ WMI_TDLS_TEARDOWN_REASON_RSSI,
+ WMI_TDLS_TEARDOWN_REASON_SCAN,
+ WMI_TDLS_DISCONNECTED_REASON_PEER_DELETE,
+ WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT,
+ WMI_TDLS_TEARDOWN_REASON_BAD_PTR,
+ WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE,
+ WMI_TDLS_ENTER_BUF_STA,
+ WMI_TDLS_EXIT_BUF_STA,
+ WMI_TDLS_ENTER_BT_BUSY_MODE,
+ WMI_TDLS_EXIT_BT_BUSY_MODE,
+ WMI_TDLS_SCAN_STARTED_EVENT,
+ WMI_TDLS_SCAN_COMPLETED_EVENT,
+};
+
+enum wmi_tdls_peer_notification {
+ WMI_TDLS_SHOULD_DISCOVER,
+ WMI_TDLS_SHOULD_TEARDOWN,
+ WMI_TDLS_PEER_DISCONNECTED,
+ WMI_TDLS_CONNECTION_TRACKER_NOTIFICATION,
+};
+
+struct wmi_tdls_peer_event {
+ struct wmi_mac_addr peer_macaddr;
+ /* see enum wmi_tdls_peer_notification*/
+ __le32 peer_status;
+ /* see enum wmi_tdls_peer_reason */
+ __le32 peer_reason;
+ __le32 vdev_id;
+} __packed;
+
+enum wmi_tid_aggr_control_conf {
+ WMI_TID_CONFIG_AGGR_CONTROL_IGNORE,
+ WMI_TID_CONFIG_AGGR_CONTROL_ENABLE,
+ WMI_TID_CONFIG_AGGR_CONTROL_DISABLE,
+};
+
+enum wmi_noack_tid_conf {
+ WMI_NOACK_TID_CONFIG_IGNORE_ACK_POLICY,
+ WMI_PEER_TID_CONFIG_ACK,
+ WMI_PEER_TID_CONFIG_NOACK,
+};
+
+enum wmi_tid_rate_ctrl_conf {
+ WMI_TID_CONFIG_RATE_CONTROL_IGNORE,
+ WMI_TID_CONFIG_RATE_CONTROL_AUTO,
+ WMI_TID_CONFIG_RATE_CONTROL_FIXED_RATE,
+ WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE,
+ WMI_PEER_TID_CONFIG_RATE_UPPER_CAP,
+};
+
+enum wmi_tid_rtscts_control_conf {
+ WMI_TID_CONFIG_RTSCTS_CONTROL_ENABLE,
+ WMI_TID_CONFIG_RTSCTS_CONTROL_DISABLE,
+};
+
+enum wmi_ext_tid_config_map {
+ WMI_EXT_TID_RTS_CTS_CONFIG = BIT(0),
+};
+
+struct wmi_per_peer_per_tid_cfg_arg {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ enum wmi_noack_tid_conf ack_policy;
+ enum wmi_tid_aggr_control_conf aggr_control;
+ u8 rate_ctrl;
+ u32 retry_count;
+ u32 rcode_flags;
+ u32 ext_tid_cfg_bitmap;
+ u32 rtscts_ctrl;
+};
+
+struct wmi_peer_per_tid_cfg_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 tid;
+
+ /* see enum wmi_noack_tid_conf */
+ __le32 ack_policy;
+
+ /* see enum wmi_tid_aggr_control_conf */
+ __le32 aggr_control;
+
+ /* see enum wmi_tid_rate_ctrl_conf */
+ __le32 rate_control;
+ __le32 rcode_flags;
+ __le32 retry_count;
+
+ /* See enum wmi_ext_tid_config_map */
+ __le32 ext_tid_cfg_bitmap;
+
+ /* see enum wmi_tid_rtscts_control_conf */
+ __le32 rtscts_ctrl;
+} __packed;
+
+enum wmi_txbf_conf {
+ WMI_TXBF_CONF_UNSUPPORTED,
+ WMI_TXBF_CONF_BEFORE_ASSOC,
+ WMI_TXBF_CONF_AFTER_ASSOC,
+};
+
+#define WMI_CCA_DETECT_LEVEL_AUTO 0
+#define WMI_CCA_DETECT_MARGIN_AUTO 0
+
+struct wmi_pdev_set_adaptive_cca_params {
+ __le32 enable;
+ __le32 cca_detect_level;
+ __le32 cca_detect_margin;
+} __packed;
+
+#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
+#define WMI_PNO_MAX_NETW_CHANNELS 26
+#define WMI_PNO_MAX_NETW_CHANNELS_EX 60
+#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID
+#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN
+
+/*size based of dot11 declaration without extra IEs as we will not carry those for PNO*/
+#define WMI_PNO_MAX_PB_REQ_SIZE 450
+
+#define WMI_PNO_24G_DEFAULT_CH 1
+#define WMI_PNO_5G_DEFAULT_CH 36
+
+#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
+#define WMI_PASSIVE_MAX_CHANNEL_TIME 110
+
+/* SSID broadcast type */
+enum wmi_SSID_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
+};
+
+struct wmi_network_type {
+ struct wmi_ssid ssid;
+ u32 authentication;
+ u32 encryption;
+ u32 bcast_nw_type;
+ u8 channel_count;
+ u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
+ s32 rssi_threshold;
+} __packed;
+
+struct wmi_pno_scan_req {
+ u8 enable;
+ u8 vdev_id;
+ u8 uc_networks_count;
+ struct wmi_network_type a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
+ u32 fast_scan_period;
+ u32 slow_scan_period;
+ u8 fast_scan_max_cycles;
+
+ bool do_passive_scan;
+
+ u32 delay_start_time;
+ u32 active_min_time;
+ u32 active_max_time;
+ u32 passive_min_time;
+ u32 passive_max_time;
+
+ /* mac address randomization attributes */
+ u32 enable_pno_scan_randomization;
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+} __packed;
+
+enum wmi_host_platform_type {
+ WMI_HOST_PLATFORM_HIGH_PERF,
+ WMI_HOST_PLATFORM_LOW_PERF,
+};
+
+enum wmi_bss_survey_req_type {
+ WMI_BSS_SURVEY_REQ_TYPE_READ = 1,
+ WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
+};
+
+struct wmi_pdev_chan_info_req_cmd {
+ __le32 type;
+ __le32 reserved;
+} __packed;
+
+/* bb timing register configurations */
+struct wmi_bb_timing_cfg_arg {
+ /* Tx_end to pa off timing */
+ u32 bb_tx_timing;
+
+ /* Tx_end to external pa off timing */
+ u32 bb_xpa_timing;
+};
+
+struct wmi_pdev_bb_timing_cfg_cmd {
+ /* Tx_end to pa off timing */
+ __le32 bb_tx_timing;
+
+ /* Tx_end to external pa off timing */
+ __le32 bb_xpa_timing;
+} __packed;
+
+struct wmi_ftm_seg_hdr {
+ __le32 len;
+ __le32 msgref;
+ __le32 segmentinfo;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_ftm_cmd {
+ __le32 tlv_header;
+ struct wmi_ftm_seg_hdr seg_hdr;
+ u8 data[];
+} __packed;
+
+#define WMI_TLV_LEN GENMASK(15, 0)
+#define WMI_TLV_TAG GENMASK(31, 16)
+#define MAX_WMI_UTF_LEN 252
+
+struct ath10k;
+struct ath10k_vif;
+struct ath10k_fw_stats_pdev;
+struct ath10k_fw_stats_peer;
+struct ath10k_fw_stats;
+
+int ath10k_wmi_attach(struct ath10k *ar);
+void ath10k_wmi_detach(struct ath10k *ar);
+void ath10k_wmi_free_host_mem(struct ath10k *ar);
+int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
+int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
+
+struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
+int ath10k_wmi_connect(struct ath10k *ar);
+
+int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
+int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+ u32 cmd_id);
+void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *arg);
+
+void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst);
+void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
+ struct wmi_host_mem_chunks *chunks);
+void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
+ const struct wmi_start_scan_arg *arg);
+void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
+ const struct wmi_wmm_params_arg *arg);
+void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
+ const struct wmi_channel_arg *arg);
+int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
+
+int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_dfs(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr, u64 tsf);
+void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ u64 tsf);
+void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf,
+ int left_len, struct wmi_phyerr_ev_arg *arg);
+void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf);
+void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf);
+void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf);
+int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype);
+int ath10k_wmi_barrier(struct ath10k *ar);
+void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
+ u32 num_tx_chain);
+void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb);
+
+#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
new file mode 100644
index 000000000000..aa7b2e703f3d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include "hif.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+
+static const struct wiphy_wowlan_support ath10k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_MAGIC_PKT,
+ .pattern_min_len = WOW_MIN_PATTERN_SIZE,
+ .pattern_max_len = WOW_MAX_PATTERN_SIZE,
+ .max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
+
+static int ath10k_wow_vif_cleanup(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ int i, ret;
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+ if (ret) {
+ ath10k_warn(ar, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ar->wow.max_num_patterns; i++) {
+ ret = ath10k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+ if (ret) {
+ ath10k_warn(ar, "failed to delete wow pattern %d for vdev %i: %d\n",
+ i, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_cleanup(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_wow_vif_cleanup(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to clean wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Convert a 802.3 format to a 802.11 format.
+ * +------------+-----------+--------+----------------+
+ * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
+ * +------------+-----------+--------+----------------+
+ * |__ |_______ |____________ |________
+ * | | | |
+ * +--+------------+----+-----------+---------------+-----------+
+ * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
+ * +--+------------+----+-----------+---------------+-----------+
+ */
+static void ath10k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
+ const struct cfg80211_pkt_pattern *old)
+{
+ u8 hdr_8023_pattern[ETH_HLEN] = {};
+ u8 hdr_8023_bit_mask[ETH_HLEN] = {};
+ u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
+ u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
+
+ int total_len = old->pkt_offset + old->pattern_len;
+ int hdr_80211_end_offset;
+
+ struct ieee80211_hdr_3addr *new_hdr_pattern =
+ (struct ieee80211_hdr_3addr *)hdr_80211_pattern;
+ struct ieee80211_hdr_3addr *new_hdr_mask =
+ (struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
+ struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
+ struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
+ int hdr_len = sizeof(*new_hdr_pattern);
+
+ struct rfc1042_hdr *new_rfc_pattern =
+ (struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
+ struct rfc1042_hdr *new_rfc_mask =
+ (struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
+ int rfc_len = sizeof(*new_rfc_pattern);
+
+ memcpy(hdr_8023_pattern + old->pkt_offset,
+ old->pattern, ETH_HLEN - old->pkt_offset);
+ memcpy(hdr_8023_bit_mask + old->pkt_offset,
+ old->mask, ETH_HLEN - old->pkt_offset);
+
+ /* Copy destination address */
+ memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
+ memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
+
+ /* Copy source address */
+ memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
+ memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
+
+ /* Copy logic link type */
+ memcpy(&new_rfc_pattern->snap_type,
+ &old_hdr_pattern->h_proto,
+ sizeof(old_hdr_pattern->h_proto));
+ memcpy(&new_rfc_mask->snap_type,
+ &old_hdr_mask->h_proto,
+ sizeof(old_hdr_mask->h_proto));
+
+ /* Calculate new pkt_offset */
+ if (old->pkt_offset < ETH_ALEN)
+ new->pkt_offset = old->pkt_offset +
+ offsetof(struct ieee80211_hdr_3addr, addr1);
+ else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
+ new->pkt_offset = old->pkt_offset +
+ offsetof(struct ieee80211_hdr_3addr, addr3) -
+ offsetof(struct ethhdr, h_source);
+ else
+ new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
+
+ /* Calculate new hdr end offset */
+ if (total_len > ETH_HLEN)
+ hdr_80211_end_offset = hdr_len + rfc_len;
+ else if (total_len > offsetof(struct ethhdr, h_proto))
+ hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
+ else if (total_len > ETH_ALEN)
+ hdr_80211_end_offset = total_len - ETH_ALEN +
+ offsetof(struct ieee80211_hdr_3addr, addr3);
+ else
+ hdr_80211_end_offset = total_len +
+ offsetof(struct ieee80211_hdr_3addr, addr1);
+
+ new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
+
+ memcpy((u8 *)new->pattern,
+ hdr_80211_pattern + new->pkt_offset,
+ new->pattern_len);
+ memcpy((u8 *)new->mask,
+ hdr_80211_bit_mask + new->pkt_offset,
+ new->pattern_len);
+
+ if (total_len > ETH_HLEN) {
+ /* Copy frame body */
+ memcpy((u8 *)new->pattern + new->pattern_len,
+ (void *)old->pattern + ETH_HLEN - old->pkt_offset,
+ total_len - ETH_HLEN);
+ memcpy((u8 *)new->mask + new->pattern_len,
+ (void *)old->mask + ETH_HLEN - old->pkt_offset,
+ total_len - ETH_HLEN);
+
+ new->pattern_len += total_len - ETH_HLEN;
+ }
+}
+
+static int ath10k_wmi_pno_check(struct ath10k *ar, u32 vdev_id,
+ struct cfg80211_sched_scan_request *nd_config,
+ struct wmi_pno_scan_req *pno)
+{
+ int i, j, ret = 0;
+ u8 ssid_len;
+
+ pno->enable = 1;
+ pno->vdev_id = vdev_id;
+ pno->uc_networks_count = nd_config->n_match_sets;
+
+ if (!pno->uc_networks_count ||
+ pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
+ return -EINVAL;
+
+ if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
+ return -EINVAL;
+
+ /* Filling per profile params */
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ ssid_len = nd_config->match_sets[i].ssid.ssid_len;
+
+ if (ssid_len == 0 || ssid_len > 32)
+ return -EINVAL;
+
+ pno->a_networks[i].ssid.ssid_len = __cpu_to_le32(ssid_len);
+
+ memcpy(pno->a_networks[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid_len);
+ pno->a_networks[i].authentication = 0;
+ pno->a_networks[i].encryption = 0;
+ pno->a_networks[i].bcast_nw_type = 0;
+
+ /*Copying list of valid channel into request */
+ pno->a_networks[i].channel_count = nd_config->n_channels;
+ pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
+
+ for (j = 0; j < nd_config->n_channels; j++) {
+ pno->a_networks[i].channels[j] =
+ nd_config->channels[j]->center_freq;
+ }
+ }
+
+ /* set scan to passive if no SSIDs are specified in the request */
+ if (nd_config->n_ssids == 0)
+ pno->do_passive_scan = true;
+ else
+ pno->do_passive_scan = false;
+
+ for (i = 0; i < nd_config->n_ssids; i++) {
+ j = 0;
+ while (j < pno->uc_networks_count) {
+ if (__le32_to_cpu(pno->a_networks[j].ssid.ssid_len) ==
+ nd_config->ssids[i].ssid_len &&
+ (memcmp(pno->a_networks[j].ssid.ssid,
+ nd_config->ssids[i].ssid,
+ __le32_to_cpu(pno->a_networks[j].ssid.ssid_len)) == 0)) {
+ pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
+ break;
+ }
+ j++;
+ }
+ }
+
+ if (nd_config->n_scan_plans == 2) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
+ pno->slow_scan_period =
+ nd_config->scan_plans[1].interval * MSEC_PER_SEC;
+ } else if (nd_config->n_scan_plans == 1) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = 1;
+ pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ } else {
+ ath10k_warn(ar, "Invalid number of scan plans %d !!",
+ nd_config->n_scan_plans);
+ }
+
+ if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ /* enable mac randomization */
+ pno->enable_pno_scan_randomization = 1;
+ memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
+ memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
+ }
+
+ pno->delay_start_time = nd_config->delay;
+
+ /* Current FW does not support min-max range for dwell time */
+ pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
+ pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
+ return ret;
+}
+
+static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
+ struct cfg80211_wowlan *wowlan)
+{
+ int ret, i;
+ unsigned long wow_mask = 0;
+ struct ath10k *ar = arvif->ar;
+ const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ int pattern_id = 0;
+
+ /* Setup requested WOW features */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_IBSS:
+ __set_bit(WOW_BEACON_EVENT, &wow_mask);
+ fallthrough;
+ case WMI_VDEV_TYPE_AP:
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+ __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_HTT_EVENT, &wow_mask);
+ __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (wowlan->disconnect) {
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_BMISS_EVENT, &wow_mask);
+ __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+ }
+
+ if (wowlan->magic_pkt)
+ __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+
+ if (wowlan->nd_config) {
+ struct wmi_pno_scan_req *pno;
+ int ret;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ ar->nlo_enabled = true;
+
+ ret = ath10k_wmi_pno_check(ar, arvif->vdev_id,
+ wowlan->nd_config, pno);
+ if (!ret) {
+ ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
+ }
+
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
+ u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ struct cfg80211_pkt_pattern new_pattern = {};
+ struct cfg80211_pkt_pattern old_pattern = patterns[i];
+ int j;
+
+ new_pattern.pattern = ath_pattern;
+ new_pattern.mask = ath_bitmask;
+ if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
+ continue;
+ /* convert bytemask to bitmask */
+ for (j = 0; j < patterns[i].pattern_len; j++)
+ if (patterns[i].mask[j / 8] & BIT(j % 8))
+ bitmask[j] = 0xff;
+ old_pattern.mask = bitmask;
+
+ if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
+ if (patterns[i].pkt_offset < ETH_HLEN) {
+ ath10k_wow_convert_8023_to_80211(&new_pattern,
+ &old_pattern);
+ } else {
+ new_pattern = old_pattern;
+ new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
+ }
+ }
+
+ if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
+ return -EINVAL;
+
+ ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+ pattern_id,
+ new_pattern.pattern,
+ new_pattern.mask,
+ new_pattern.pattern_len,
+ new_pattern.pkt_offset);
+ if (ret) {
+ ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n",
+ pattern_id,
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ pattern_id++;
+ __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+ }
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ if (!test_bit(i, &wow_mask))
+ continue;
+ ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable wakeup event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_set_wakeups(struct ath10k *ar,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath10k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_vif_wow_set_wakeups(arvif, wowlan);
+ if (ret) {
+ ath10k_warn(ar, "failed to set wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_vif_wow_clean_nlo(struct ath10k_vif *arvif)
+{
+ int ret = 0;
+ struct ath10k *ar = arvif->ar;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ if (ar->nlo_enabled) {
+ struct wmi_pno_scan_req *pno;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ pno->enable = 0;
+ ar->nlo_enabled = false;
+ ret = ath10k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int ath10k_wow_nlo_cleanup(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_vif_wow_clean_nlo(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to clean nlo settings on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_enable(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->target_suspend);
+
+ ret = ath10k_wmi_wow_enable(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to issue wow enable: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->target_suspend, 3 * HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "timed out while waiting for suspend completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_wakeup(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->wow.wakeup_completed);
+
+ ret = ath10k_wmi_wow_host_wakeup_ind(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to send wow wakeup indication: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->running_fw->fw_file.fw_features))) {
+ ret = 1;
+ goto exit;
+ }
+
+ ret = ath10k_wow_cleanup(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath10k_wow_set_wakeups(ar, wowlan);
+ if (ret) {
+ ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ath10k_mac_wait_tx_complete(ar);
+
+ ret = ath10k_wow_enable(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to start wow: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath10k_hif_suspend(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
+ goto wakeup;
+ }
+
+ goto exit;
+
+wakeup:
+ ath10k_wow_wakeup(ar);
+
+cleanup:
+ ath10k_wow_cleanup(ar);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ? 1 : 0;
+}
+
+void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->running_fw->fw_file.fw_features)) {
+ device_set_wakeup_enable(ar->dev, enabled);
+ }
+ mutex_unlock(&ar->conf_mutex);
+}
+
+int ath10k_wow_op_resume(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->running_fw->fw_file.fw_features))) {
+ ret = 1;
+ goto exit;
+ }
+
+ ret = ath10k_hif_resume(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to resume hif: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_wow_wakeup(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
+
+ ret = ath10k_wow_nlo_cleanup(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to cleanup nlo: %d\n", ret);
+
+exit:
+ if (ret) {
+ switch (ar->state) {
+ case ATH10K_STATE_ON:
+ ar->state = ATH10K_STATE_RESTARTING;
+ ret = 1;
+ break;
+ case ATH10K_STATE_OFF:
+ case ATH10K_STATE_RESTARTING:
+ case ATH10K_STATE_RESTARTED:
+ case ATH10K_STATE_UTF:
+ case ATH10K_STATE_WEDGED:
+ ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n",
+ ar->state);
+ ret = -EIO;
+ break;
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath10k_wow_init(struct ath10k *ar)
+{
+ if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->running_fw->fw_file.fw_features))
+ return 0;
+
+ if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
+ return -EINVAL;
+
+ ar->wow.wowlan_support = ath10k_wowlan_support;
+
+ if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
+ ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
+ ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
+ }
+
+ if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
+ ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
+ ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ }
+
+ ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+ ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+ device_set_wakeup_capable(ar->dev, true);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h
new file mode 100644
index 000000000000..14ea4e1e925e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wow.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
+ */
+#ifndef _WOW_H_
+#define _WOW_H_
+
+struct ath10k_wow {
+ u32 max_num_patterns;
+ struct completion wakeup_completed;
+ struct wiphy_wowlan_support wowlan_support;
+};
+
+#ifdef CONFIG_PM
+
+int ath10k_wow_init(struct ath10k *ar);
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath10k_wow_op_resume(struct ieee80211_hw *hw);
+void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+
+#else
+
+static inline int ath10k_wow_init(struct ath10k *ar)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+#endif /* _WOW_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
new file mode 100644
index 000000000000..659ef134ef16
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+config ATH11K
+ tristate "Qualcomm Technologies 802.11ax chipset support"
+ depends on MAC80211 && HAS_DMA
+ select CRYPTO_MICHAEL_MIC
+ select ATH_COMMON
+ select QCOM_QMI_HELPERS
+ help
+ This module adds support for Qualcomm Technologies 802.11ax family of
+ chipsets.
+
+ If you choose to build a module, it'll be called ath11k.
+
+config ATH11K_AHB
+ tristate "Atheros ath11k AHB support"
+ depends on ATH11K
+ depends on REMOTEPROC
+ help
+ This module adds support for AHB bus
+
+config ATH11K_PCI
+ tristate "Atheros ath11k PCI support"
+ depends on ATH11K && PCI
+ select MHI_BUS
+ select QRTR
+ select QRTR_MHI
+ select PCI_PWRCTRL_PWRSEQ if HAVE_PWRCTRL
+ help
+ This module adds support for PCIE bus
+
+config ATH11K_DEBUG
+ bool "QCA ath11k debugging"
+ depends on ATH11K
+ help
+ Enables debug support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH11K_DEBUGFS
+ bool "QCA ath11k debugfs support"
+ depends on ATH11K && DEBUG_FS && MAC80211_DEBUGFS
+ help
+ Enable ath11k debugfs support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH11K_TRACING
+ bool "ath11k tracing support"
+ depends on ATH11K && EVENT_TRACING
+ help
+ Select this to use ath11k tracing infrastructure.
+
+config ATH11K_SPECTRAL
+ bool "QCA ath11k spectral scan support"
+ depends on ATH11K_DEBUGFS
+ depends on RELAY
+ help
+ Enable ath11k spectral scan support
+
+ Say Y to enable access to the FFT/spectral data via debugfs.
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
new file mode 100644
index 000000000000..d9092414b362
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+obj-$(CONFIG_ATH11K) += ath11k.o
+ath11k-y += core.o \
+ hal.o \
+ hal_tx.o \
+ hal_rx.o \
+ wmi.o \
+ mac.o \
+ reg.o \
+ htc.o \
+ qmi.o \
+ dp.o \
+ dp_tx.o \
+ dp_rx.o \
+ debug.o \
+ ce.o \
+ peer.o \
+ dbring.o \
+ hw.o \
+ pcic.o \
+ fw.o \
+ p2p.o
+
+ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
+ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
+ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
+ath11k-$(CONFIG_THERMAL) += thermal.o
+ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o
+ath11k-$(CONFIG_PM) += wow.o
+ath11k-$(CONFIG_DEV_COREDUMP) += coredump.o
+
+obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o
+ath11k_ahb-y += ahb.o
+
+obj-$(CONFIG_ATH11K_PCI) += ath11k_pci.o
+ath11k_pci-y += mhi.o pci.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
new file mode 100644
index 000000000000..8dfe9b40c126
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -0,0 +1,1319 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include "ahb.h"
+#include "debug.h"
+#include "hif.h"
+#include "qmi.h"
+#include <linux/remoteproc.h>
+#include "pcic.h"
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+static const struct of_device_id ath11k_ahb_of_match[] = {
+ /* TODO: Should we change the compatible string to something similar
+ * to one that ath10k uses?
+ */
+ { .compatible = "qcom,ipq8074-wifi",
+ .data = (void *)ATH11K_HW_IPQ8074,
+ },
+ { .compatible = "qcom,ipq6018-wifi",
+ .data = (void *)ATH11K_HW_IPQ6018_HW10,
+ },
+ { .compatible = "qcom,wcn6750-wifi",
+ .data = (void *)ATH11K_HW_WCN6750_HW10,
+ },
+ { .compatible = "qcom,ipq5018-wifi",
+ .data = (void *)ATH11K_HW_IPQ5018_HW10,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
+
+#define ATH11K_IRQ_CE0_OFFSET 4
+
+static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
+ "misc-pulse1",
+ "misc-latch",
+ "sw-exception",
+ "watchdog",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+/* enum ext_irq_num - irq numbers that can be used by external modules
+ * like datapath
+ */
+enum ext_irq_num {
+ host2wbm_desc_feed = 16,
+ host2reo_re_injection,
+ host2reo_command,
+ host2rxdma_monitor_ring3,
+ host2rxdma_monitor_ring2,
+ host2rxdma_monitor_ring1,
+ reo2host_exception,
+ wbm2host_rx_release,
+ reo2host_status,
+ reo2host_destination_ring4,
+ reo2host_destination_ring3,
+ reo2host_destination_ring2,
+ reo2host_destination_ring1,
+ rxdma2host_monitor_destination_mac3,
+ rxdma2host_monitor_destination_mac2,
+ rxdma2host_monitor_destination_mac1,
+ ppdu_end_interrupts_mac3,
+ ppdu_end_interrupts_mac2,
+ ppdu_end_interrupts_mac1,
+ rxdma2host_monitor_status_ring_mac3,
+ rxdma2host_monitor_status_ring_mac2,
+ rxdma2host_monitor_status_ring_mac1,
+ host2rxdma_host_buf_ring_mac3,
+ host2rxdma_host_buf_ring_mac2,
+ host2rxdma_host_buf_ring_mac1,
+ rxdma2host_destination_ring_mac3,
+ rxdma2host_destination_ring_mac2,
+ rxdma2host_destination_ring_mac1,
+ host2tcl_input_ring4,
+ host2tcl_input_ring3,
+ host2tcl_input_ring2,
+ host2tcl_input_ring1,
+ wbm2host_tx_completions_ring3,
+ wbm2host_tx_completions_ring2,
+ wbm2host_tx_completions_ring1,
+ tcl2host_status_ring,
+};
+
+static int
+ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
+{
+ return ab->pci.msi.irqs[vector];
+}
+
+static inline u32
+ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
+{
+ u32 window_start = 0;
+
+ /* If offset lies within DP register range, use 1st window */
+ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
+ window_start = ATH11K_PCI_WINDOW_START;
+ /* If offset lies within CE register range, use 2nd window */
+ else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
+ ATH11K_PCI_WINDOW_RANGE_MASK)
+ window_start = 2 * ATH11K_PCI_WINDOW_START;
+
+ return window_start;
+}
+
+static void
+ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ u32 window_start;
+
+ /* WCN6750 uses static window based register access*/
+ window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
+
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+}
+
+static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
+{
+ u32 window_start;
+ u32 val;
+
+ /* WCN6750 uses static window based register access */
+ window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
+
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ return val;
+}
+
+static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
+ .wakeup = NULL,
+ .release = NULL,
+ .get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
+ .window_write32 = ath11k_ahb_window_write32_wcn6750,
+ .window_read32 = ath11k_ahb_window_read32_wcn6750,
+};
+
+static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
+{
+ return ioread32(ab->mem + offset);
+}
+
+static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ iowrite32(value, ab->mem + offset);
+}
+
+static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+
+static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ ath11k_ahb_ext_grp_disable(irq_grp);
+
+ if (irq_grp->napi_enabled) {
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ irq_grp->napi_enabled = false;
+ }
+ }
+}
+
+static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath11k_ahb_read32(ab, offset);
+ ath11k_ahb_write32(ab, offset, val | BIT(bit));
+}
+
+static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath11k_ahb_read32(ab, offset);
+ ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
+}
+
+static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
+{
+ const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
+
+ ce_attr = &ab->hw_params.host_ce_config[ce_id];
+ if (ce_attr->src_nentries)
+ ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
+
+ if (ce_attr->dest_nentries) {
+ ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
+ ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ ie3_reg_addr);
+ }
+}
+
+static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
+{
+ const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
+
+ ce_attr = &ab->hw_params.host_ce_config[ce_id];
+ if (ce_attr->src_nentries)
+ ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
+
+ if (ce_attr->dest_nentries) {
+ ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
+ ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ ie3_reg_addr);
+ }
+}
+
+static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
+{
+ int i, j;
+ int irq_idx;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_ahb_ce_irq_enable(ab, i);
+ }
+}
+
+static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_ahb_ce_irq_disable(ab, i);
+ }
+}
+
+static int ath11k_ahb_start(struct ath11k_base *ab)
+{
+ ath11k_ahb_ce_irqs_enable(ab);
+ ath11k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+
+static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ if (!irq_grp->napi_enabled) {
+ napi_enable(&irq_grp->napi);
+ irq_grp->napi_enabled = true;
+ }
+ ath11k_ahb_ext_grp_enable(irq_grp);
+ }
+}
+
+static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
+{
+ __ath11k_ahb_ext_irq_disable(ab);
+ ath11k_ahb_sync_ext_irqs(ab);
+}
+
+static void ath11k_ahb_stop(struct ath11k_base *ab)
+{
+ if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath11k_ahb_ce_irqs_disable(ab);
+ ath11k_ahb_sync_ce_irqs(ab);
+ ath11k_ahb_kill_tasklets(ab);
+ timer_delete_sync(&ab->rx_replenish_retry);
+ ath11k_ce_cleanup_pipes(ab);
+}
+
+static int ath11k_ahb_power_up(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ int ret;
+
+ ret = rproc_boot(ab_ahb->tgt_rproc);
+ if (ret)
+ ath11k_err(ab, "failed to boot the remote processor Q6\n");
+
+ return ret;
+}
+
+static void ath11k_ahb_power_down(struct ath11k_base *ab, bool is_suspend)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+
+ rproc_shutdown(ab_ahb->tgt_rproc);
+}
+
+static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
+{
+ struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+
+ cfg->tgt_ce_len = ab->hw_params.target_ce_count;
+ cfg->tgt_ce = ab->hw_params.target_ce_config;
+ cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
+ cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
+ ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
+}
+
+static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
+{
+ int i, j;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++)
+ free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+ netif_napi_del(&irq_grp->napi);
+ free_netdev(irq_grp->napi_ndev);
+ }
+}
+
+static void ath11k_ahb_free_irq(struct ath11k_base *ab)
+{
+ int irq_idx;
+ int i;
+
+ if (ab->hw_params.hybrid_bus_type)
+ return ath11k_pcic_free_irq(ab);
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
+ free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+ }
+
+ ath11k_ahb_free_ext_irq(ab);
+}
+
+static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
+{
+ struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
+
+ ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+}
+
+static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ce_pipe *ce_pipe = arg;
+
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+
+ ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+
+ tasklet_schedule(&ce_pipe->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath11k_ext_irq_grp,
+ napi);
+ struct ath11k_base *ab = irq_grp->ab;
+ int work_done;
+
+ work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ ath11k_ahb_ext_grp_enable(irq_grp);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ext_irq_grp *irq_grp = arg;
+
+ /* last interrupt received for this group */
+ irq_grp->timestamp = jiffies;
+
+ ath11k_ahb_ext_grp_disable(irq_grp);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
+{
+ struct ath11k_hw_params *hw = &ab->hw_params;
+ int i, j;
+ int irq;
+ int ret;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ u32 num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+
+ irq_grp->napi_ndev = alloc_netdev_dummy(0);
+ if (!irq_grp->napi_ndev)
+ return -ENOMEM;
+
+ netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
+ ath11k_ahb_ext_grp_napi_poll);
+
+ for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
+ if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ wbm2host_tx_completions_ring1 - j;
+ }
+
+ if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ reo2host_destination_ring1 - j;
+ }
+
+ if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_exception;
+
+ if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = wbm2host_rx_release;
+
+ if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_status;
+
+ if (j < ab->hw_params.max_radios) {
+ if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ rxdma2host_destination_ring_mac1 -
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
+ }
+
+ if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ host2rxdma_host_buf_ring_mac1 -
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
+ }
+
+ if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ ppdu_end_interrupts_mac1 -
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
+ irq_grp->irqs[num_irq++] =
+ rxdma2host_monitor_status_ring_mac1 -
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
+ }
+ }
+ }
+ irq_grp->num_irq = num_irq;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ int irq_idx = irq_grp->irqs[j];
+
+ irq = platform_get_irq_byname(ab->pdev,
+ irq_name[irq_idx]);
+ ab->irq_num[irq_idx] = irq;
+ irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
+ ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
+ IRQF_TRIGGER_RISING,
+ irq_name[irq_idx], irq_grp);
+ if (ret) {
+ ath11k_err(ab, "failed request_irq for %d\n",
+ irq);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_ahb_config_irq(struct ath11k_base *ab)
+{
+ int irq, irq_idx, i;
+ int ret;
+
+ if (ab->hw_params.hybrid_bus_type)
+ return ath11k_pcic_config_irq(ab);
+
+ /* Configure CE irqs */
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
+
+ tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
+ irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
+ ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
+ IRQF_TRIGGER_RISING, irq_name[irq_idx],
+ ce_pipe);
+ if (ret)
+ return ret;
+
+ ab->irq_num[irq_idx] = irq;
+ }
+
+ /* Configure external interrupts */
+ ret = ath11k_ahb_config_ext_irq(ab);
+
+ return ret;
+}
+
+static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params.svc_to_ce_map[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+static int ath11k_ahb_hif_suspend(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ u32 wake_irq;
+ u32 value = 0;
+ int ret;
+
+ if (!device_may_wakeup(ab->dev))
+ return -EPERM;
+
+ wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
+
+ ret = enable_irq_wake(wake_irq);
+ if (ret) {
+ ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret);
+ return ret;
+ }
+
+ value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
+ ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
+ value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER,
+ ATH11K_AHB_SMP2P_SMEM_MSG);
+
+ ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
+ ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
+ if (ret) {
+ ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n");
+
+ return ret;
+}
+
+static int ath11k_ahb_hif_resume(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ u32 wake_irq;
+ u32 value = 0;
+ int ret;
+
+ if (!device_may_wakeup(ab->dev))
+ return -EPERM;
+
+ wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
+
+ ret = disable_irq_wake(wake_irq);
+ if (ret) {
+ ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret);
+ return ret;
+ }
+
+ reinit_completion(&ab->wow.wakeup_completed);
+
+ value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
+ ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
+ value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT,
+ ATH11K_AHB_SMP2P_SMEM_MSG);
+
+ ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
+ ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
+ if (ret) {
+ ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n");
+
+ return 0;
+}
+
+static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
+ .start = ath11k_ahb_start,
+ .stop = ath11k_ahb_stop,
+ .read32 = ath11k_ahb_read32,
+ .write32 = ath11k_ahb_write32,
+ .read = NULL,
+ .irq_enable = ath11k_ahb_ext_irq_enable,
+ .irq_disable = ath11k_ahb_ext_irq_disable,
+ .map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
+ .power_down = ath11k_ahb_power_down,
+ .power_up = ath11k_ahb_power_up,
+};
+
+static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
+ .start = ath11k_pcic_start,
+ .stop = ath11k_pcic_stop,
+ .read32 = ath11k_pcic_read32,
+ .write32 = ath11k_pcic_write32,
+ .read = NULL,
+ .irq_enable = ath11k_pcic_ext_irq_enable,
+ .irq_disable = ath11k_pcic_ext_irq_disable,
+ .get_msi_address = ath11k_pcic_get_msi_address,
+ .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
+ .map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
+ .power_down = ath11k_ahb_power_down,
+ .power_up = ath11k_ahb_power_up,
+ .suspend = ath11k_ahb_hif_suspend,
+ .resume = ath11k_ahb_hif_resume,
+ .ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq,
+ .ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq,
+};
+
+static int ath11k_core_get_rproc(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct device *dev = ab->dev;
+ struct rproc *prproc;
+ phandle rproc_phandle;
+
+ if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
+ ath11k_err(ab, "failed to get q6_rproc handle\n");
+ return -ENOENT;
+ }
+
+ prproc = rproc_get_by_phandle(rproc_phandle);
+ if (!prproc) {
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "failed to get rproc, deferring\n");
+ return -EPROBE_DEFER;
+ }
+ ab_ahb->tgt_rproc = prproc;
+
+ return 0;
+}
+
+static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
+ phys_addr_t msi_addr_pa;
+ dma_addr_t msi_addr_iova;
+ struct resource *res;
+ int int_prop;
+ int ret;
+ int i;
+
+ ret = ath11k_pcic_init_msi_config(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init msi config: %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ath11k_err(ab, "failed to fetch msi_addr\n");
+ return -ENOENT;
+ }
+
+ msi_addr_pa = res->start;
+ msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
+ DMA_FROM_DEVICE, 0);
+ if (dma_mapping_error(ab->dev, msi_addr_iova))
+ return -ENOMEM;
+
+ ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
+ ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
+
+ ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
+ if (ret)
+ return ret;
+
+ ab->pci.msi.ep_base_data = int_prop + 32;
+
+ for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ return ret;
+
+ ab->pci.msi.irqs[i] = ret;
+ }
+
+ set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
+
+ return 0;
+}
+
+static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+
+ if (!ab->hw_params.smp2p_wow_exit)
+ return 0;
+
+ ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out",
+ &ab_ahb->smp2p_info.smem_bit);
+ if (IS_ERR(ab_ahb->smp2p_info.smem_state)) {
+ ath11k_err(ab, "failed to fetch smem state: %ld\n",
+ PTR_ERR(ab_ahb->smp2p_info.smem_state));
+ return PTR_ERR(ab_ahb->smp2p_info.smem_state);
+ }
+
+ return 0;
+}
+
+static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+
+ if (!ab->hw_params.smp2p_wow_exit)
+ return;
+
+ qcom_smem_state_put(ab_ahb->smp2p_info.smem_state);
+}
+
+static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
+ struct resource *mem_res;
+ void __iomem *mem;
+
+ if (ab->hw_params.hybrid_bus_type)
+ return ath11k_ahb_setup_msi_resources(ab);
+
+ mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
+ if (IS_ERR(mem)) {
+ dev_err(&pdev->dev, "ioremap error\n");
+ return PTR_ERR(mem);
+ }
+
+ ab->mem = mem;
+ ab->mem_len = resource_size(mem_res);
+
+ return 0;
+}
+
+static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct device *dev = ab->dev;
+ struct resource r;
+ int ret;
+
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &r);
+ if (ret) {
+ dev_err(dev, "failed to resolve msa fixed region\n");
+ return ret;
+ }
+
+ ab_ahb->fw.msa_paddr = r.start;
+ ab_ahb->fw.msa_size = resource_size(&r);
+
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 1, &r);
+ if (ret) {
+ dev_err(dev, "failed to resolve ce fixed region\n");
+ return ret;
+ }
+
+ ab_ahb->fw.ce_paddr = r.start;
+ ab_ahb->fw.ce_size = resource_size(&r);
+
+ return 0;
+}
+
+static int ath11k_ahb_ce_remap(struct ath11k_base *ab)
+{
+ const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
+ struct platform_device *pdev = ab->pdev;
+
+ if (!ce_remap) {
+ /* no separate CE register space */
+ ab->mem_ce = ab->mem;
+ return 0;
+ }
+
+ /* ce register space is moved out of wcss unlike ipq8074 or ipq6018
+ * and the space is not contiguous, hence remapping the CE registers
+ * to a new space for accessing them.
+ */
+ ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
+ if (!ab->mem_ce) {
+ dev_err(&pdev->dev, "ce ioremap error\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ath11k_ahb_ce_unmap(struct ath11k_base *ab)
+{
+ if (ab->hw_params.ce_remap)
+ iounmap(ab->mem_ce);
+}
+
+static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct device *host_dev = ab->dev;
+ struct platform_device_info info = {};
+ struct iommu_domain *iommu_dom;
+ struct platform_device *pdev;
+ struct device_node *node;
+ int ret;
+
+ /* Chipsets not requiring MSA need not initialize
+ * MSA resources, return success in such cases.
+ */
+ if (!ab->hw_params.fixed_fw_mem)
+ return 0;
+
+ node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
+ if (!node) {
+ ab_ahb->fw.use_tz = true;
+ return 0;
+ }
+
+ ret = ath11k_ahb_setup_msa_resources(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to setup msa resources\n");
+ return ret;
+ }
+
+ info.fwnode = &node->fwnode;
+ info.parent = host_dev;
+ info.name = node->name;
+ info.dma_mask = DMA_BIT_MASK(32);
+
+ pdev = platform_device_register_full(&info);
+ if (IS_ERR(pdev)) {
+ of_node_put(node);
+ return PTR_ERR(pdev);
+ }
+
+ ret = of_dma_configure(&pdev->dev, node, true);
+ if (ret) {
+ ath11k_err(ab, "dma configure fail: %d\n", ret);
+ goto err_unregister;
+ }
+
+ ab_ahb->fw.dev = &pdev->dev;
+
+ iommu_dom = iommu_paging_domain_alloc(ab_ahb->fw.dev);
+ if (IS_ERR(iommu_dom)) {
+ ath11k_err(ab, "failed to allocate iommu domain\n");
+ ret = PTR_ERR(iommu_dom);
+ goto err_unregister;
+ }
+
+ ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
+ if (ret) {
+ ath11k_err(ab, "could not attach device: %d\n", ret);
+ goto err_iommu_free;
+ }
+
+ ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
+ ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
+ IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
+ if (ret) {
+ ath11k_err(ab, "failed to map firmware region: %d\n", ret);
+ goto err_iommu_detach;
+ }
+
+ ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
+ ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
+ IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
+ if (ret) {
+ ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
+ goto err_iommu_unmap;
+ }
+
+ ab_ahb->fw.use_tz = false;
+ ab_ahb->fw.iommu_domain = iommu_dom;
+ of_node_put(node);
+
+ return 0;
+
+err_iommu_unmap:
+ iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
+
+err_iommu_detach:
+ iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
+
+err_iommu_free:
+ iommu_domain_free(iommu_dom);
+
+err_unregister:
+ platform_device_unregister(pdev);
+ of_node_put(node);
+
+ return ret;
+}
+
+static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct iommu_domain *iommu;
+ size_t unmapped_size;
+
+ /* Chipsets not requiring MSA would have not initialized
+ * MSA resources, return success in such cases.
+ */
+ if (!ab->hw_params.fixed_fw_mem)
+ return 0;
+
+ if (ab_ahb->fw.use_tz)
+ return 0;
+
+ iommu = ab_ahb->fw.iommu_domain;
+
+ unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
+ if (unmapped_size != ab_ahb->fw.msa_size)
+ ath11k_err(ab, "failed to unmap firmware: %zu\n",
+ unmapped_size);
+
+ unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
+ if (unmapped_size != ab_ahb->fw.ce_size)
+ ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
+ unmapped_size);
+
+ iommu_detach_device(iommu, ab_ahb->fw.dev);
+ iommu_domain_free(iommu);
+
+ platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
+
+ return 0;
+}
+
+static int ath11k_ahb_probe(struct platform_device *pdev)
+{
+ struct ath11k_base *ab;
+ const struct ath11k_hif_ops *hif_ops;
+ const struct ath11k_pci_ops *pci_ops;
+ enum ath11k_hw_rev hw_rev;
+ int ret;
+
+ hw_rev = (uintptr_t)device_get_match_data(&pdev->dev);
+
+ switch (hw_rev) {
+ case ATH11K_HW_IPQ8074:
+ case ATH11K_HW_IPQ6018_HW10:
+ case ATH11K_HW_IPQ5018_HW10:
+ hif_ops = &ath11k_ahb_hif_ops_ipq8074;
+ pci_ops = NULL;
+ break;
+ case ATH11K_HW_WCN6750_HW10:
+ hif_ops = &ath11k_ahb_hif_ops_wcn6750;
+ pci_ops = &ath11k_ahb_pci_ops_wcn6750;
+ break;
+ default:
+ dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
+ return -EOPNOTSUPP;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
+ return ret;
+ }
+
+ ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
+ ATH11K_BUS_AHB);
+ if (!ab) {
+ dev_err(&pdev->dev, "failed to allocate ath11k base\n");
+ return -ENOMEM;
+ }
+
+ ab->hif.ops = hif_ops;
+ ab->pdev = pdev;
+ ab->hw_rev = hw_rev;
+ ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
+ platform_set_drvdata(pdev, ab);
+
+ ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_core_free;
+ }
+
+ ret = ath11k_core_pre_init(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath11k_ahb_setup_resources(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath11k_ahb_ce_remap(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath11k_ahb_fw_resources_init(ab);
+ if (ret)
+ goto err_ce_unmap;
+
+ ret = ath11k_ahb_setup_smp2p_handle(ab);
+ if (ret)
+ goto err_fw_deinit;
+
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ goto err_release_smp2p_handle;
+
+ ret = ath11k_ce_alloc_pipes(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
+ goto err_hal_srng_deinit;
+ }
+
+ ath11k_ahb_init_qmi_ce_config(ab);
+
+ ret = ath11k_core_get_rproc(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to get rproc: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ ret = ath11k_core_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init core: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ ret = ath11k_ahb_config_irq(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to configure irq: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ ath11k_qmi_fwreset_from_cold_boot(ab);
+
+ return 0;
+
+err_ce_free:
+ ath11k_ce_free_pipes(ab);
+
+err_hal_srng_deinit:
+ ath11k_hal_srng_deinit(ab);
+
+err_release_smp2p_handle:
+ ath11k_ahb_release_smp2p_handle(ab);
+
+err_fw_deinit:
+ ath11k_ahb_fw_resource_deinit(ab);
+
+err_ce_unmap:
+ ath11k_ahb_ce_unmap(ab);
+
+err_core_free:
+ ath11k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static void ath11k_ahb_remove_prepare(struct ath11k_base *ab)
+{
+ unsigned long left;
+
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
+ left = wait_for_completion_timeout(&ab->driver_recovery,
+ ATH11K_AHB_RECOVERY_TIMEOUT);
+ if (!left)
+ ath11k_warn(ab, "failed to receive recovery response completion\n");
+ }
+
+ set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
+ cancel_work_sync(&ab->restart_work);
+ cancel_work_sync(&ab->qmi.event_work);
+}
+
+static void ath11k_ahb_free_resources(struct ath11k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
+
+ ath11k_ahb_free_irq(ab);
+ ath11k_hal_srng_deinit(ab);
+ ath11k_ahb_release_smp2p_handle(ab);
+ ath11k_ahb_fw_resource_deinit(ab);
+ ath11k_ce_free_pipes(ab);
+ ath11k_ahb_ce_unmap(ab);
+
+ ath11k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+}
+
+static void ath11k_ahb_remove(struct platform_device *pdev)
+{
+ struct ath11k_base *ab = platform_get_drvdata(pdev);
+
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_ahb_power_down(ab, false);
+ ath11k_debugfs_soc_destroy(ab);
+ ath11k_qmi_deinit_service(ab);
+ goto qmi_fail;
+ }
+
+ ath11k_ahb_remove_prepare(ab);
+ ath11k_core_deinit(ab);
+
+qmi_fail:
+ ath11k_fw_destroy(ab);
+ ath11k_ahb_free_resources(ab);
+}
+
+static void ath11k_ahb_shutdown(struct platform_device *pdev)
+{
+ struct ath11k_base *ab = platform_get_drvdata(pdev);
+
+ /* platform shutdown() & remove() are mutually exclusive.
+ * remove() is invoked during rmmod & shutdown() during
+ * system reboot/shutdown.
+ */
+ ath11k_ahb_remove_prepare(ab);
+
+ if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)))
+ goto free_resources;
+
+ ath11k_core_deinit(ab);
+
+free_resources:
+ ath11k_fw_destroy(ab);
+ ath11k_ahb_free_resources(ab);
+}
+
+static struct platform_driver ath11k_ahb_driver = {
+ .driver = {
+ .name = "ath11k",
+ .of_match_table = ath11k_ahb_of_match,
+ },
+ .probe = ath11k_ahb_probe,
+ .remove = ath11k_ahb_remove,
+ .shutdown = ath11k_ahb_shutdown,
+};
+
+module_platform_driver(ath11k_ahb_driver);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath11k/ahb.h b/drivers/net/wireless/ath/ath11k/ahb.h
new file mode 100644
index 000000000000..415ddfd26654
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ahb.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH11K_AHB_H
+#define ATH11K_AHB_H
+
+#include "core.h"
+
+#define ATH11K_AHB_RECOVERY_TIMEOUT (3 * HZ)
+
+#define ATH11K_AHB_SMP2P_SMEM_MSG GENMASK(15, 0)
+#define ATH11K_AHB_SMP2P_SMEM_SEQ_NO GENMASK(31, 16)
+#define ATH11K_AHB_SMP2P_SMEM_VALUE_MASK 0xFFFFFFFF
+
+enum ath11k_ahb_smp2p_msg_id {
+ ATH11K_AHB_POWER_SAVE_ENTER = 1,
+ ATH11K_AHB_POWER_SAVE_EXIT,
+};
+
+struct ath11k_base;
+
+struct ath11k_ahb {
+ struct rproc *tgt_rproc;
+ struct {
+ struct device *dev;
+ struct iommu_domain *iommu_domain;
+ dma_addr_t msa_paddr;
+ u32 msa_size;
+ dma_addr_t ce_paddr;
+ u32 ce_size;
+ bool use_tz;
+ } fw;
+ struct {
+ unsigned short seq_no;
+ unsigned int smem_bit;
+ struct qcom_smem_state *smem_state;
+ } smp2p_info;
+};
+
+static inline struct ath11k_ahb *ath11k_ahb_priv(struct ath11k_base *ab)
+{
+ return (struct ath11k_ahb *)ab->drv_priv;
+}
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
new file mode 100644
index 000000000000..a7a163621b21
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -0,0 +1,1080 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/export.h>
+#include "dp_rx.h"
+#include "debug.h"
+#include "hif.h"
+
+const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
+ },
+
+ /* CE8: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE9: host->target WMI (mac2) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
+ },
+
+ /* CE10: target->host HTT */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE11: Not used */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+};
+
+const struct ce_attr ath11k_host_ce_config_qca6390[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
+ },
+
+ /* CE8: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+};
+
+const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 32,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
+ },
+};
+
+static bool ath11k_ce_need_shadow_fix(int ce_id)
+{
+ /* only ce4 needs shadow workaround */
+ if (ce_id == 4)
+ return true;
+ return false;
+}
+
+void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab)
+{
+ int i;
+
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++)
+ if (ath11k_ce_need_shadow_fix(i))
+ ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
+}
+
+static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
+ struct sk_buff *skb, dma_addr_t paddr)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct ath11k_ce_ring *ring = pipe->dest_ring;
+ struct hal_srng *srng;
+ unsigned int write_index;
+ unsigned int nentries_mask = ring->nentries_mask;
+ u32 *desc;
+ int ret;
+
+ lockdep_assert_held(&ab->ce.ce_lock);
+
+ write_index = ring->write_index;
+
+ srng = &ab->hal.srng_list[ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ ath11k_hal_ce_dst_set_desc(desc, paddr);
+
+ ring->skb[write_index] = skb;
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ring->write_index = write_index;
+
+ pipe->rx_buf_needed--;
+
+ ret = 0;
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret = 0;
+
+ if (!(pipe->dest_ring || pipe->status_ring))
+ return 0;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+ while (pipe->rx_buf_needed) {
+ skb = dev_alloc_skb(pipe->buf_sz);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ab->dev, paddr))) {
+ ath11k_warn(ab, "failed to dma map ce rx buf\n");
+ dev_kfree_skb_any(skb);
+ ret = -EIO;
+ goto exit;
+ }
+
+ ATH11K_SKB_RXCB(skb)->paddr = paddr;
+
+ ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
+
+ if (ret) {
+ ath11k_dbg(ab, ATH11K_DBG_CE, "failed to enqueue rx buf: %d\n",
+ ret);
+ dma_unmap_single(ab->dev, paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ goto exit;
+ }
+ }
+
+exit:
+ spin_unlock_bh(&ab->ce.ce_lock);
+ return ret;
+}
+
+static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
+ struct sk_buff **skb, int *nbytes)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct hal_srng *srng;
+ unsigned int sw_index;
+ unsigned int nentries_mask;
+ u32 *desc;
+ int ret = 0;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ sw_index = pipe->dest_ring->sw_index;
+ nentries_mask = pipe->dest_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -EIO;
+ goto err;
+ }
+
+ *nbytes = ath11k_hal_ce_dst_status_get_length(desc);
+
+ *skb = pipe->dest_ring->skb[sw_index];
+ pipe->dest_ring->skb[sw_index] = NULL;
+
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ pipe->dest_ring->sw_index = sw_index;
+
+ pipe->rx_buf_needed++;
+err:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return ret;
+}
+
+static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ unsigned int nbytes, max_nbytes;
+ int ret;
+
+ __skb_queue_head_init(&list);
+ while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
+ max_nbytes = skb->len + skb_tailroom(skb);
+ dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+
+ if (unlikely(max_nbytes < nbytes || nbytes == 0)) {
+ ath11k_warn(ab, "unexpected rx length (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath11k_dbg(ab, ATH11K_DBG_CE, "rx ce pipe %d len %d\n",
+ pipe->pipe_num, skb->len);
+ pipe->recv_cb(ab, skb);
+ }
+
+ ret = ath11k_ce_rx_post_pipe(pipe);
+ if (ret && ret != -ENOSPC) {
+ ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+ pipe->pipe_num, ret);
+ mod_timer(&ab->rx_replenish_retry,
+ jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
+ }
+}
+
+static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct hal_srng *srng;
+ unsigned int sw_index;
+ unsigned int nentries_mask;
+ struct sk_buff *skb;
+ u32 *desc;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ sw_index = pipe->src_ring->sw_index;
+ nentries_mask = pipe->src_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_src_reap_next(ab, srng);
+ if (!desc) {
+ skb = ERR_PTR(-EIO);
+ goto err_unlock;
+ }
+
+ skb = pipe->src_ring->skb[sw_index];
+
+ pipe->src_ring->skb[sw_index] = NULL;
+
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ pipe->src_ring->sw_index = sw_index;
+
+err_unlock:
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return skb;
+}
+
+static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+
+ __skb_queue_head_init(&list);
+ while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
+ DMA_TO_DEVICE);
+
+ if ((!pipe->send_cb) || ab->hw_params.credit_flow) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath11k_dbg(ab, ATH11K_DBG_CE, "tx ce pipe %d len %d\n",
+ pipe->pipe_num, skb->len);
+ pipe->send_cb(ab, skb);
+ }
+}
+
+static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id,
+ struct hal_srng_params *ring_params)
+{
+ u32 msi_data_start;
+ u32 msi_data_count, msi_data_idx;
+ u32 msi_irq_start;
+ u32 addr_lo;
+ u32 addr_hi;
+ int ret;
+
+ ret = ath11k_get_user_msi_vector(ab, "CE",
+ &msi_data_count, &msi_data_start,
+ &msi_irq_start);
+
+ if (ret)
+ return;
+
+ ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
+ ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
+
+ ring_params->msi_addr = addr_lo;
+ ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
+ ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
+ ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+}
+
+static int ath11k_ce_init_ring(struct ath11k_base *ab,
+ struct ath11k_ce_ring *ce_ring,
+ int ce_id, enum hal_ring_type type)
+{
+ struct hal_srng_params params = {};
+ int ret;
+
+ params.ring_base_paddr = ce_ring->base_addr_ce_space;
+ params.ring_base_vaddr = ce_ring->base_addr_owner_space;
+ params.num_entries = ce_ring->nentries;
+
+ if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
+ ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, &params);
+
+ switch (type) {
+ case HAL_CE_SRC:
+ if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
+ params.intr_batch_cntr_thres_entries = 1;
+ break;
+ case HAL_CE_DST:
+ params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max;
+ if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.intr_timer_thres_us = 1024;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.low_threshold = ce_ring->nentries - 3;
+ }
+ break;
+ case HAL_CE_DST_STATUS:
+ if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.intr_batch_cntr_thres_entries = 1;
+ params.intr_timer_thres_us = 0x1000;
+ }
+ break;
+ default:
+ ath11k_warn(ab, "Invalid CE ring type %d\n", type);
+ return -EINVAL;
+ }
+
+ /* TODO: Init other params needed by HAL to init the ring */
+
+ ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, &params);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ce_id);
+ return ret;
+ }
+
+ ce_ring->hal_ring_id = ret;
+
+ if (ab->hw_params.supports_shadow_regs &&
+ ath11k_ce_need_shadow_fix(ce_id))
+ ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id],
+ ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
+ ce_ring->hal_ring_id);
+
+ return 0;
+}
+
+static struct ath11k_ce_ring *
+ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
+{
+ struct ath11k_ce_ring *ce_ring;
+ dma_addr_t base_addr;
+
+ ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
+ if (ce_ring == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ce_ring->nentries = nentries;
+ ce_ring->nentries_mask = nentries - 1;
+
+ /* Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ ce_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ab->dev,
+ nentries * desc_sz + CE_DESC_RING_ALIGN,
+ &base_addr, GFP_KERNEL);
+ if (!ce_ring->base_addr_owner_space_unaligned) {
+ kfree(ce_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ce_ring->base_addr_ce_space_unaligned = base_addr;
+
+ ce_ring->base_addr_owner_space = PTR_ALIGN(
+ ce_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ ce_ring->base_addr_ce_space = ALIGN(
+ ce_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return ce_ring;
+}
+
+static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+ const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
+ struct ath11k_ce_ring *ring;
+ int nentries;
+ int desc_sz;
+
+ pipe->attr_flags = attr->flags;
+
+ if (attr->src_nentries) {
+ pipe->send_cb = attr->send_cb;
+ nentries = roundup_pow_of_two(attr->src_nentries);
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->src_ring = ring;
+ }
+
+ if (attr->dest_nentries) {
+ pipe->recv_cb = attr->recv_cb;
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->dest_ring = ring;
+
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->status_ring = ring;
+ }
+
+ return 0;
+}
+
+void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+ const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
+
+ if (attr->src_nentries)
+ ath11k_ce_tx_process_cb(pipe);
+
+ if (pipe->recv_cb)
+ ath11k_ce_recv_process_cb(pipe);
+}
+
+void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+ const struct ce_attr *attr = &ab->hw_params.host_ce_config[pipe_id];
+
+ if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries)
+ ath11k_ce_tx_process_cb(pipe);
+}
+EXPORT_SYMBOL(ath11k_ce_per_engine_service);
+
+int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
+ u16 transfer_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+ struct hal_srng *srng;
+ u32 *desc;
+ unsigned int write_index, sw_index;
+ unsigned int nentries_mask;
+ int ret = 0;
+ u8 byte_swap_data = 0;
+ int num_used;
+
+ /* Check if some entries could be regained by handling tx completion if
+ * the CE has interrupts disabled and the used entries is more than the
+ * defined usage threshold.
+ */
+ if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
+ spin_lock_bh(&ab->ce.ce_lock);
+ write_index = pipe->src_ring->write_index;
+
+ sw_index = pipe->src_ring->sw_index;
+
+ if (write_index >= sw_index)
+ num_used = write_index - sw_index;
+ else
+ num_used = pipe->src_ring->nentries - sw_index +
+ write_index;
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ if (num_used > ATH11K_CE_USAGE_THRESHOLD)
+ ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
+ }
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ return -ESHUTDOWN;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ write_index = pipe->src_ring->write_index;
+ nentries_mask = pipe->src_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+ ath11k_hal_srng_access_end(ab, srng);
+ ret = -ENOBUFS;
+ goto err_unlock;
+ }
+
+ desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
+ if (!desc) {
+ ath11k_hal_srng_access_end(ab, srng);
+ ret = -ENOBUFS;
+ goto err_unlock;
+ }
+
+ if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
+ byte_swap_data = 1;
+
+ ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
+ skb->len, transfer_id, byte_swap_data);
+
+ pipe->src_ring->skb[write_index] = skb;
+ pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
+ write_index);
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ if (ath11k_ce_need_shadow_fix(pipe_id))
+ ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]);
+
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return 0;
+
+err_unlock:
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return ret;
+}
+
+static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct ath11k_ce_ring *ring = pipe->dest_ring;
+ struct sk_buff *skb;
+ int i;
+
+ if (!(ring && pipe->buf_sz))
+ return;
+
+ for (i = 0; i < ring->nentries; i++) {
+ skb = ring->skb[i];
+ if (!skb)
+ continue;
+
+ ring->skb[i] = NULL;
+ dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ath11k_ce_shadow_config(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ab->hw_params.host_ce_config[i].src_nentries)
+ ath11k_hal_srng_update_shadow_config(ab,
+ HAL_CE_SRC, i);
+
+ if (ab->hw_params.host_ce_config[i].dest_nentries) {
+ ath11k_hal_srng_update_shadow_config(ab,
+ HAL_CE_DST, i);
+
+ ath11k_hal_srng_update_shadow_config(ab,
+ HAL_CE_DST_STATUS, i);
+ }
+ }
+}
+
+void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
+ u32 **shadow_cfg, u32 *shadow_cfg_len)
+{
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
+
+ /* shadow is already configured */
+ if (*shadow_cfg_len)
+ return;
+
+ /* shadow isn't configured yet, configure now.
+ * non-CE srngs are configured firstly, then
+ * all CE srngs.
+ */
+ ath11k_hal_srng_shadow_config(ab);
+ ath11k_ce_shadow_config(ab);
+
+ /* get the shadow configuration */
+ ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
+}
+EXPORT_SYMBOL(ath11k_ce_get_shadow_config);
+
+void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int pipe_num;
+
+ ath11k_ce_stop_shadow_timers(ab);
+
+ for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) {
+ pipe = &ab->ce.ce_pipe[pipe_num];
+ ath11k_ce_rx_pipe_cleanup(pipe);
+
+ /* Cleanup any src CE's which have interrupts disabled */
+ ath11k_ce_poll_send_completed(ab, pipe_num);
+
+ /* NOTE: Should we also clean up tx buffer in all pipes? */
+ }
+}
+EXPORT_SYMBOL(ath11k_ce_cleanup_pipes);
+
+void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int i;
+ int ret;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+ ret = ath11k_ce_rx_post_pipe(pipe);
+ if (ret) {
+ if (ret == -ENOSPC)
+ continue;
+
+ ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+ i, ret);
+ mod_timer(&ab->rx_replenish_retry,
+ jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
+
+ return;
+ }
+ }
+}
+EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
+
+void ath11k_ce_rx_replenish_retry(struct timer_list *t)
+{
+ struct ath11k_base *ab = timer_container_of(ab, t, rx_replenish_retry);
+
+ ath11k_ce_rx_post_buf(ab);
+}
+
+int ath11k_ce_init_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int i;
+ int ret;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+
+ if (pipe->src_ring) {
+ ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
+ HAL_CE_SRC);
+ if (ret) {
+ ath11k_warn(ab, "failed to init src ring: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->src_ring->write_index = 0;
+ pipe->src_ring->sw_index = 0;
+ }
+
+ if (pipe->dest_ring) {
+ ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
+ HAL_CE_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to init dest ring: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->rx_buf_needed = pipe->dest_ring->nentries ?
+ pipe->dest_ring->nentries - 2 : 0;
+
+ pipe->dest_ring->write_index = 0;
+ pipe->dest_ring->sw_index = 0;
+ }
+
+ if (pipe->status_ring) {
+ ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
+ HAL_CE_DST_STATUS);
+ if (ret) {
+ ath11k_warn(ab, "failed to init dest status ing: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->status_ring->write_index = 0;
+ pipe->status_ring->sw_index = 0;
+ }
+ }
+
+ return 0;
+}
+
+void ath11k_ce_free_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ struct ath11k_ce_ring *ce_ring;
+ int desc_sz;
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_need_shadow_fix(i))
+ ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
+
+ if (pipe->src_ring) {
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ ce_ring = pipe->src_ring;
+ dma_free_coherent(ab->dev,
+ pipe->src_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ ce_ring->base_addr_owner_space_unaligned,
+ ce_ring->base_addr_ce_space_unaligned);
+ kfree(pipe->src_ring);
+ pipe->src_ring = NULL;
+ }
+
+ if (pipe->dest_ring) {
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ ce_ring = pipe->dest_ring;
+ dma_free_coherent(ab->dev,
+ pipe->dest_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ ce_ring->base_addr_owner_space_unaligned,
+ ce_ring->base_addr_ce_space_unaligned);
+ kfree(pipe->dest_ring);
+ pipe->dest_ring = NULL;
+ }
+
+ if (pipe->status_ring) {
+ desc_sz =
+ ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ ce_ring = pipe->status_ring;
+ dma_free_coherent(ab->dev,
+ pipe->status_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ ce_ring->base_addr_owner_space_unaligned,
+ ce_ring->base_addr_ce_space_unaligned);
+ kfree(pipe->status_ring);
+ pipe->status_ring = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL(ath11k_ce_free_pipes);
+
+int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int i;
+ int ret;
+ const struct ce_attr *attr;
+
+ spin_lock_init(&ab->ce.ce_lock);
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ attr = &ab->hw_params.host_ce_config[i];
+ pipe = &ab->ce.ce_pipe[i];
+ pipe->pipe_num = i;
+ pipe->ab = ab;
+ pipe->buf_sz = attr->src_sz_max;
+
+ ret = ath11k_ce_alloc_pipe(ab, i);
+ if (ret) {
+ /* Free any partial successful allocation */
+ ath11k_ce_free_pipes(ab);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_ce_alloc_pipes);
+
+/* For Big Endian Host, Copy Engine byte_swap is enabled
+ * When Copy Engine does byte_swap, need to byte swap again for the
+ * Host to get/put buffer content in the correct byte order
+ */
+void ath11k_ce_byte_swap(void *mem, u32 len)
+{
+ int i;
+
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+ if (!mem)
+ return;
+
+ for (i = 0; i < (len / 4); i++) {
+ *(u32 *)mem = swab32(*(u32 *)mem);
+ mem += 4;
+ }
+ }
+}
+
+int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id)
+{
+ if (ce_id >= ab->hw_params.ce_count)
+ return -EINVAL;
+
+ return ab->hw_params.host_ce_config[ce_id].flags;
+}
+EXPORT_SYMBOL(ath11k_ce_get_attr_flags);
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
new file mode 100644
index 000000000000..bcde2fcf02cf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_CE_H
+#define ATH11K_CE_H
+
+#define CE_COUNT_MAX 12
+
+/* Byte swap data words */
+#define CE_ATTR_BYTE_SWAP_DATA 2
+
+/* no interrupt on copy completion */
+#define CE_ATTR_DIS_INTR 8
+
+/* Host software's Copy Engine configuration. */
+#ifdef __BIG_ENDIAN
+#define CE_ATTR_FLAGS CE_ATTR_BYTE_SWAP_DATA
+#else
+#define CE_ATTR_FLAGS 0
+#endif
+
+/* Threshold to poll for tx completion in case of Interrupt disabled CE's */
+#define ATH11K_CE_USAGE_THRESHOLD 32
+
+void ath11k_ce_byte_swap(void *mem, u32 len);
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE 0
+#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT 3 /* bidirectional */
+#define PIPEDIR_INOUT_H2H 4 /* bidirectional, host to host */
+
+/* CE address/mask */
+#define CE_HOST_IE_ADDRESS 0x00A1803C
+#define CE_HOST_IE_2_ADDRESS 0x00A18040
+#define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS
+
+/* CE IE registers are different for IPQ5018 */
+#define CE_HOST_IPQ5018_IE_ADDRESS 0x0841804C
+#define CE_HOST_IPQ5018_IE_2_ADDRESS 0x08418050
+#define CE_HOST_IPQ5018_IE_3_ADDRESS CE_HOST_IPQ5018_IE_ADDRESS
+
+#define CE_HOST_IE_3_SHIFT 0xC
+
+#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+
+#define ATH11K_CE_RX_POST_RETRY_JIFFIES 50
+
+struct ath11k_base;
+
+/*
+ * Establish a mapping between a service/direction and a pipe.
+ * Configuration information for a Copy Engine pipe and services.
+ * Passed from Host to Target through QMI message and must be in
+ * little endian format.
+ */
+struct service_to_pipe {
+ __le32 service_id;
+ __le32 pipedir;
+ __le32 pipenum;
+};
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target through QMI message during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+ __le32 pipenum;
+ __le32 pipedir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+struct ce_ie_addr {
+ u32 ie1_reg_addr;
+ u32 ie2_reg_addr;
+ u32 ie3_reg_addr;
+};
+
+struct ce_remap {
+ u32 base;
+ u32 size;
+};
+
+struct ce_attr {
+ /* CE_ATTR_* values */
+ unsigned int flags;
+
+ /* #entries in source ring - Must be a power of 2 */
+ unsigned int src_nentries;
+
+ /*
+ * Max source send size for this CE.
+ * This is also the minimum size of a destination buffer.
+ */
+ unsigned int src_sz_max;
+
+ /* #entries in destination ring - Must be a power of 2 */
+ unsigned int dest_nentries;
+
+ void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
+ void (*send_cb)(struct ath11k_base *, struct sk_buff *);
+};
+
+#define CE_DESC_RING_ALIGN 8
+
+struct ath11k_ce_ring {
+ /* Number of entries in this ring; must be power of 2 */
+ unsigned int nentries;
+ unsigned int nentries_mask;
+
+ /* For dest ring, this is the next index to be processed
+ * by software after it was/is received into.
+ *
+ * For src ring, this is the last descriptor that was sent
+ * and completion processed by software.
+ *
+ * Regardless of src or dest ring, this is an invariant
+ * (modulo ring size):
+ * write index >= read index >= sw_index
+ */
+ unsigned int sw_index;
+ /* cached copy */
+ unsigned int write_index;
+
+ /* Start of DMA-coherent area reserved for descriptors */
+ /* Host address space */
+ void *base_addr_owner_space_unaligned;
+ /* CE address space */
+ dma_addr_t base_addr_ce_space_unaligned;
+
+ /* Actual start of descriptors.
+ * Aligned to descriptor-size boundary.
+ * Points into reserved DMA-coherent area, above.
+ */
+ /* Host address space */
+ void *base_addr_owner_space;
+
+ /* CE address space */
+ dma_addr_t base_addr_ce_space;
+
+ /* HAL ring id */
+ u32 hal_ring_id;
+
+ /* keep last */
+ struct sk_buff *skb[];
+};
+
+struct ath11k_ce_pipe {
+ struct ath11k_base *ab;
+ u16 pipe_num;
+ unsigned int attr_flags;
+ unsigned int buf_sz;
+ unsigned int rx_buf_needed;
+
+ void (*send_cb)(struct ath11k_base *, struct sk_buff *);
+ void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
+
+ struct tasklet_struct intr_tq;
+ struct ath11k_ce_ring *src_ring;
+ struct ath11k_ce_ring *dest_ring;
+ struct ath11k_ce_ring *status_ring;
+ u64 timestamp;
+};
+
+struct ath11k_ce {
+ struct ath11k_ce_pipe ce_pipe[CE_COUNT_MAX];
+ /* Protects rings of all ce pipes */
+ spinlock_t ce_lock;
+ struct ath11k_hp_update_timer hp_timer[CE_COUNT_MAX];
+};
+
+extern const struct ce_attr ath11k_host_ce_config_ipq8074[];
+extern const struct ce_attr ath11k_host_ce_config_qca6390[];
+extern const struct ce_attr ath11k_host_ce_config_qcn9074[];
+
+void ath11k_ce_cleanup_pipes(struct ath11k_base *ab);
+void ath11k_ce_rx_replenish_retry(struct timer_list *t);
+void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id);
+int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
+ u16 transfer_id);
+void ath11k_ce_rx_post_buf(struct ath11k_base *ab);
+int ath11k_ce_init_pipes(struct ath11k_base *ab);
+int ath11k_ce_alloc_pipes(struct ath11k_base *ab);
+void ath11k_ce_free_pipes(struct ath11k_base *ab);
+int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id);
+void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id);
+void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
+ u32 **shadow_cfg, u32 *shadow_cfg_len);
+void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
new file mode 100644
index 000000000000..812686173ac8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -0,0 +1,2715 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/remoteproc.h>
+#include <linux/firmware.h>
+#include <linux/of.h>
+
+#include "core.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "debug.h"
+#include "hif.h"
+#include "wow.h"
+#include "fw.h"
+
+unsigned int ath11k_debug_mask;
+EXPORT_SYMBOL(ath11k_debug_mask);
+module_param_named(debug_mask, ath11k_debug_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+static unsigned int ath11k_crypto_mode;
+module_param_named(crypto_mode, ath11k_crypto_mode, uint, 0644);
+MODULE_PARM_DESC(crypto_mode, "crypto mode: 0-hardware, 1-software");
+
+/* frame mode values are mapped as per enum ath11k_hw_txrx_mode */
+unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
+module_param_named(frame_mode, ath11k_frame_mode, uint, 0644);
+MODULE_PARM_DESC(frame_mode,
+ "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
+
+bool ath11k_ftm_mode;
+module_param_named(ftm_mode, ath11k_ftm_mode, bool, 0444);
+MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
+
+static const struct ath11k_hw_params ath11k_hw_params[] = {
+ {
+ .hw_rev = ATH11K_HW_IPQ8074,
+ .name = "ipq8074 hw2.0",
+ .fw = {
+ .dir = "IPQ8074/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &ipq8074_ops,
+ .ring_mask = &ath11k_hw_ring_mask_ipq8074,
+ .internal_sleep_clock = false,
+ .regs = &ipq8074_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
+ .host_ce_config = ath11k_host_ce_config_ipq8074,
+ .ce_count = 12,
+ .target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
+ .target_ce_count = 11,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
+ .svc_to_ce_map_len = 21,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .single_pdev_only = false,
+ .rxdma1_enable = true,
+ .num_rxdma_per_pdev = 1,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+ .htt_peer_map_v2 = true,
+
+ .spectral = {
+ .fft_sz = 2,
+ /* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes.
+ * so added pad size as 2 bytes to compensate the BIN size
+ */
+ .fft_pad_sz = 2,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 16,
+ .max_fft_bins = 512,
+ .fragment_160mhz = true,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = true,
+ .full_monitor_mode = false,
+ .supports_shadow_regs = false,
+ .idle_ps = false,
+ .supports_sta_ps = false,
+ .coldboot_cal_mm = true,
+ .coldboot_cal_ftm = true,
+ .cbcal_restart_fw = true,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
+ .supports_suspend = false,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ .supports_regdb = false,
+ .fix_l1ss = true,
+ .credit_flow = false,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX,
+ .hal_params = &ath11k_hw_hal_params_ipq8074,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = true,
+ .supports_rssi_stats = false,
+ .fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = true,
+ .fixed_mem_region = true,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_dual_stations = false,
+ .pdev_suspend = false,
+ },
+ {
+ .hw_rev = ATH11K_HW_IPQ6018_HW10,
+ .name = "ipq6018 hw1.0",
+ .fw = {
+ .dir = "IPQ6018/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 2,
+ .bdf_addr = 0x4ABC0000,
+ .hw_ops = &ipq6018_ops,
+ .ring_mask = &ath11k_hw_ring_mask_ipq8074,
+ .internal_sleep_clock = false,
+ .regs = &ipq8074_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
+ .host_ce_config = ath11k_host_ce_config_ipq8074,
+ .ce_count = 12,
+ .target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
+ .target_ce_count = 11,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
+ .svc_to_ce_map_len = 19,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .single_pdev_only = false,
+ .rxdma1_enable = true,
+ .num_rxdma_per_pdev = 1,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+ .htt_peer_map_v2 = true,
+
+ .spectral = {
+ .fft_sz = 4,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 16,
+ .max_fft_bins = 512,
+ .fragment_160mhz = true,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = true,
+ .full_monitor_mode = false,
+ .supports_shadow_regs = false,
+ .idle_ps = false,
+ .supports_sta_ps = false,
+ .coldboot_cal_mm = true,
+ .coldboot_cal_ftm = true,
+ .cbcal_restart_fw = true,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
+ .supports_suspend = false,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ .supports_regdb = false,
+ .fix_l1ss = true,
+ .credit_flow = false,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX,
+ .hal_params = &ath11k_hw_hal_params_ipq8074,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = true,
+ .supports_rssi_stats = false,
+ .fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = true,
+ .fixed_mem_region = true,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = false,
+ .support_dual_stations = false,
+ .pdev_suspend = false,
+ },
+ {
+ .name = "qca6390 hw2.0",
+ .hw_rev = ATH11K_HW_QCA6390_HW20,
+ .fw = {
+ .dir = "QCA6390/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &qca6390_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &qca6390_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ .supports_monitor = false,
+ .full_monitor_mode = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 2 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ .supports_regdb = false,
+ .fix_l1ss = true,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0171ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ .support_dual_stations = true,
+ .pdev_suspend = false,
+ },
+ {
+ .name = "qcn9074 hw1.0",
+ .hw_rev = ATH11K_HW_QCN9074_HW10,
+ .fw = {
+ .dir = "QCN9074/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 1,
+ .single_pdev_only = false,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074,
+ .hw_ops = &qcn9074_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qcn9074,
+ .internal_sleep_clock = false,
+ .regs = &qcn9074_regs,
+ .host_ce_config = ath11k_host_ce_config_qcn9074,
+ .ce_count = 6,
+ .target_ce_config = ath11k_target_ce_config_wlan_qcn9074,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074,
+ .svc_to_ce_map_len = 18,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .rxdma1_enable = true,
+ .num_rxdma_per_pdev = 1,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+ .htt_peer_map_v2 = true,
+
+ .spectral = {
+ .fft_sz = 2,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 16,
+ .fft_hdr_len = 24,
+ .max_fft_bins = 1024,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = true,
+ .full_monitor_mode = true,
+ .supports_shadow_regs = false,
+ .idle_ps = false,
+ .supports_sta_ps = false,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = true,
+ .cbcal_restart_fw = true,
+ .fw_mem_mode = 2,
+ .num_vdevs = 8,
+ .num_peers = 128,
+ .supports_suspend = false,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+ .supports_regdb = false,
+ .fix_l1ss = true,
+ .credit_flow = false,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX,
+ .hal_params = &ath11k_hw_hal_params_ipq8074,
+ .supports_dynamic_smps_6ghz = true,
+ .alloc_cacheable_memory = true,
+ .supports_rssi_stats = false,
+ .fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = true,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = false,
+ .support_dual_stations = false,
+ .pdev_suspend = false,
+ },
+ {
+ .name = "wcn6855 hw2.0",
+ .hw_rev = ATH11K_HW_WCN6855_HW20,
+ .fw = {
+ .dir = "WCN6855/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ .supports_monitor = false,
+ .full_monitor_mode = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 2 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ .support_dual_stations = true,
+ .pdev_suspend = false,
+ },
+ {
+ .name = "wcn6855 hw2.1",
+ .hw_rev = ATH11K_HW_WCN6855_HW21,
+ .fw = {
+ .dir = "WCN6855/hw2.1",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 2 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ .support_dual_stations = true,
+ .pdev_suspend = false,
+ },
+ {
+ .name = "wcn6750 hw1.0",
+ .hw_rev = ATH11K_HW_WCN6750_HW10,
+ .fw = {
+ .dir = "WCN6750/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 1,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6750_ops,
+ .ring_mask = &ath11k_hw_ring_mask_wcn6750,
+ .internal_sleep_clock = false,
+ .regs = &wcn6750_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 1,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = true,
+ .coldboot_cal_ftm = true,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 3,
+ .num_peers = 512,
+ .supports_suspend = false,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX,
+ .hal_params = &ath11k_hw_hal_params_wcn6750,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = false,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = true,
+ .hybrid_bus_type = true,
+ .fixed_fw_mem = true,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = false,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
+ .smp2p_wow_exit = true,
+ .support_fw_mac_sequence = true,
+ .support_dual_stations = false,
+ .pdev_suspend = true,
+ },
+ {
+ .hw_rev = ATH11K_HW_IPQ5018_HW10,
+ .name = "ipq5018 hw1.0",
+ .fw = {
+ .dir = "IPQ5018/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = MAX_RADIOS_5018,
+ .bdf_addr = 0x4BA00000,
+ /* hal_desc_sz and hw ops are similar to qcn9074 */
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
+ .ring_mask = &ath11k_hw_ring_mask_ipq8074,
+ .credit_flow = false,
+ .max_tx_ring = 1,
+ .spectral = {
+ .fft_sz = 2,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 16,
+ .fft_hdr_len = 24,
+ .max_fft_bins = 1024,
+ },
+ .internal_sleep_clock = false,
+ .regs = &ipq5018_regs,
+ .hw_ops = &ipq5018_ops,
+ .host_ce_config = ath11k_host_ce_config_qcn9074,
+ .ce_count = CE_CNT_5018,
+ .target_ce_config = ath11k_target_ce_config_wlan_ipq5018,
+ .target_ce_count = TARGET_CE_CNT_5018,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq5018,
+ .svc_to_ce_map_len = SVC_CE_MAP_LEN_5018,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq5018,
+ .ce_remap = &ath11k_ce_remap_ipq5018,
+ .rxdma1_enable = true,
+ .num_rxdma_per_pdev = RXDMA_PER_PDEV_5018,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+ .htt_peer_map_v2 = true,
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = false,
+ .supports_sta_ps = false,
+ .supports_shadow_regs = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
+ .supports_regdb = false,
+ .idle_ps = false,
+ .supports_suspend = false,
+ .hal_params = &ath11k_hw_hal_params_ipq8074,
+ .single_pdev_only = false,
+ .coldboot_cal_mm = true,
+ .coldboot_cal_ftm = true,
+ .cbcal_restart_fw = true,
+ .fix_l1ss = true,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = true,
+ .supports_rssi_stats = false,
+ .fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = true,
+ .fixed_mem_region = true,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = false,
+ .support_dual_stations = false,
+ .pdev_suspend = false,
+ },
+ {
+ .name = "qca2066 hw2.1",
+ .hw_rev = ATH11K_HW_QCA2066_HW21,
+ .fw = {
+ .dir = "QCA2066/hw2.1",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ .supports_monitor = false,
+ .full_monitor_mode = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 2 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ .support_dual_stations = true,
+ },
+ {
+ .name = "qca6698aq hw2.1",
+ .hw_rev = ATH11K_HW_QCA6698AQ_HW21,
+ .fw = {
+ .dir = "QCA6698AQ/hw2.1",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxdma_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 2 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ .support_dual_stations = true,
+ .pdev_suspend = false,
+ },
+};
+
+static const struct dmi_system_id ath11k_pm_quirk_table[] = {
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* X13 G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J3"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* X13 G4 AMD #2 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21J4"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T14 G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K3"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T14 G4 AMD #2 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K4"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* P14s G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K5"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* P14s G4 AMD #2 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K6"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T16 G2 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K7"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T16 G2 AMD #2 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K8"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* P16s G2 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21K9"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* P16s G2 AMD #2 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21KA"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T14s G4 AMD #1 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21F8"),
+ },
+ },
+ {
+ .driver_data = (void *)ATH11K_PM_WOW,
+ .matches = { /* T14s G4 AMD #2 */
+ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "21F9"),
+ },
+ },
+ {}
+};
+
+void ath11k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath11k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath11k_fw_stats_bcn_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_bcn *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath11k_fw_stats_init(struct ath11k *ar)
+{
+ INIT_LIST_HEAD(&ar->fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.bcn);
+
+ init_completion(&ar->fw_stats_complete);
+ init_completion(&ar->fw_stats_done);
+}
+
+void ath11k_fw_stats_free(struct ath11k_fw_stats *stats)
+{
+ ath11k_fw_stats_pdevs_free(&stats->pdevs);
+ ath11k_fw_stats_vdevs_free(&stats->vdevs);
+ ath11k_fw_stats_bcn_free(&stats->bcn);
+}
+
+bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab)
+{
+ if (!ath11k_cold_boot_cal)
+ return false;
+
+ if (ath11k_ftm_mode)
+ return ab->hw_params.coldboot_cal_ftm;
+
+ else
+ return ab->hw_params.coldboot_cal_mm;
+}
+
+/* Check if we need to continue with suspend/resume operation.
+ * Return:
+ * a negative value: error happens and don't continue.
+ * 0: no error but don't continue.
+ * positive value: no error and do continue.
+ */
+static int ath11k_core_continue_suspend_resume(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+
+ if (!ab->hw_params.supports_suspend)
+ return -EOPNOTSUPP;
+
+ /* so far single_pdev_only chips have supports_suspend as true
+ * so pass 0 as a dummy pdev_id here.
+ */
+ ar = ab->pdevs[0].ar;
+ if (!ar || ar->state != ATH11K_STATE_OFF)
+ return 0;
+
+ return 1;
+}
+
+static int ath11k_core_suspend_wow(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_dp_rx_pktlog_stop(ab, true);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* So far only single_pdev_only devices can reach here,
+ * so it is valid to handle the first, and the only, pdev.
+ */
+ ret = ath11k_mac_wait_tx_complete(ab->pdevs[0].ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to wait tx complete: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_wow_enable(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_rx_pktlog_stop(ab, false);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath11k_ce_stop_shadow_timers(ab);
+ ath11k_dp_stop_shadow_timers(ab);
+
+ ath11k_hif_irq_disable(ab);
+ ath11k_hif_ce_irq_disable(ab);
+
+ ret = ath11k_hif_suspend(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to suspend hif: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath11k_core_suspend_default(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_dp_rx_pktlog_stop(ab, true);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* So far only single_pdev_only devices can reach here,
+ * so it is valid to handle the first, and the only, pdev.
+ */
+ ret = ath11k_mac_wait_tx_complete(ab->pdevs[0].ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to wait tx complete: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_rx_pktlog_stop(ab, false);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath11k_ce_stop_shadow_timers(ab);
+ ath11k_dp_stop_shadow_timers(ab);
+
+ /* PM framework skips suspend_late/resume_early callbacks
+ * if other devices report errors in their suspend callbacks.
+ * However ath11k_core_resume() would still be called because
+ * here we return success thus kernel put us on dpm_suspended_list.
+ * Since we won't go through a power down/up cycle, there is
+ * no chance to call complete(&ab->restart_completed) in
+ * ath11k_core_restart(), making ath11k_core_resume() timeout.
+ * So call it here to avoid this issue. This also works in case
+ * no error happens thus suspend_late/resume_early get called,
+ * because it will be reinitialized in ath11k_core_resume_early().
+ */
+ complete(&ab->restart_completed);
+
+ return 0;
+}
+
+int ath11k_core_suspend(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return ath11k_core_suspend_wow(ab);
+
+ return ath11k_core_suspend_default(ab);
+}
+EXPORT_SYMBOL(ath11k_core_suspend);
+
+int ath11k_core_suspend_late(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return 0;
+
+ ath11k_hif_irq_disable(ab);
+ ath11k_hif_ce_irq_disable(ab);
+
+ ath11k_hif_power_down(ab, true);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_core_suspend_late);
+
+int ath11k_core_resume_early(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return 0;
+
+ reinit_completion(&ab->restart_completed);
+ ret = ath11k_hif_power_up(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to power up hif during resume: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath11k_core_resume_early);
+
+static int ath11k_core_resume_default(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ long time_left;
+ int ret;
+
+ time_left = wait_for_completion_timeout(&ab->restart_completed,
+ ATH11K_RESET_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath11k_warn(ab, "timeout while waiting for restart complete");
+ return -ETIMEDOUT;
+ }
+
+ /* So far only single_pdev_only devices can reach here,
+ * so it is valid to handle the first, and the only, pdev.
+ */
+ ar = ab->pdevs[0].ar;
+ if (ab->hw_params.current_cc_support &&
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
+ ret = ath11k_reg_set_cc(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to set country code during resume: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = ath11k_dp_rx_pktlog_start(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int ath11k_core_resume_wow(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_hif_resume(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret);
+ return ret;
+ }
+
+ ath11k_hif_ce_irq_enable(ab);
+ ath11k_hif_irq_enable(ab);
+
+ ret = ath11k_dp_rx_pktlog_start(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath11k_wow_wakeup(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to wakeup wow during resume: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_core_resume(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
+ return ret;
+
+ if (ab->actual_pm_policy == ATH11K_PM_WOW)
+ return ath11k_core_resume_wow(ab);
+
+ return ath11k_core_resume_default(ab);
+}
+EXPORT_SYMBOL(ath11k_core_resume);
+
+static void ath11k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
+{
+ struct ath11k_base *ab = data;
+ const char *magic = ATH11K_SMBIOS_BDF_EXT_MAGIC;
+ struct ath11k_smbios_bdf *smbios = (struct ath11k_smbios_bdf *)hdr;
+ ssize_t copied;
+ size_t len;
+ int i;
+
+ if (ab->qmi.target.bdf_ext[0] != '\0')
+ return;
+
+ if (hdr->type != ATH11K_SMBIOS_BDF_EXT_TYPE)
+ return;
+
+ if (hdr->length != ATH11K_SMBIOS_BDF_EXT_LENGTH) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "wrong smbios bdf ext type length (%d).\n",
+ hdr->length);
+ return;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ switch (smbios->country_code_flag) {
+ case ATH11K_SMBIOS_CC_ISO:
+ ab->new_alpha2[0] = (smbios->cc_code >> 8) & 0xff;
+ ab->new_alpha2[1] = smbios->cc_code & 0xff;
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "smbios cc_code %c%c\n",
+ ab->new_alpha2[0], ab->new_alpha2[1]);
+ break;
+ case ATH11K_SMBIOS_CC_WW:
+ ab->new_alpha2[0] = '0';
+ ab->new_alpha2[1] = '0';
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "smbios worldwide regdomain\n");
+ break;
+ default:
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "ignore smbios country code setting %d\n",
+ smbios->country_code_flag);
+ break;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!smbios->bdf_enabled) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant name not found.\n");
+ return;
+ }
+
+ /* Only one string exists (per spec) */
+ if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "bdf variant magic does not match.\n");
+ return;
+ }
+
+ len = min_t(size_t,
+ strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
+ for (i = 0; i < len; i++) {
+ if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "bdf variant name contains non ascii chars.\n");
+ return;
+ }
+ }
+
+ /* Copy extension name without magic prefix */
+ copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
+ sizeof(ab->qmi.target.bdf_ext));
+ if (copied < 0) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "bdf variant string is longer than the buffer can accommodate\n");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "found and validated bdf variant smbios_type 0x%x bdf %s\n",
+ ATH11K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
+}
+
+int ath11k_core_check_smbios(struct ath11k_base *ab)
+{
+ ab->qmi.target.bdf_ext[0] = '\0';
+ dmi_walk(ath11k_core_check_cc_code_bdfext, ab);
+
+ if (ab->qmi.target.bdf_ext[0] == '\0')
+ return -ENODATA;
+
+ return 0;
+}
+
+int ath11k_core_check_dt(struct ath11k_base *ab)
+{
+ size_t max_len = sizeof(ab->qmi.target.bdf_ext);
+ const char *variant = NULL;
+ struct device_node *node;
+
+ node = ab->dev->of_node;
+ if (!node)
+ return -ENOENT;
+
+ of_property_read_string(node, "qcom,calibration-variant",
+ &variant);
+ if (!variant)
+ of_property_read_string(node, "qcom,ath11k-calibration-variant",
+ &variant);
+ if (!variant)
+ return -ENODATA;
+
+ if (strscpy(ab->qmi.target.bdf_ext, variant, max_len) < 0)
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
+ variant);
+
+ return 0;
+}
+
+enum ath11k_bdf_name_type {
+ ATH11K_BDF_NAME_FULL,
+ ATH11K_BDF_NAME_BUS_NAME,
+ ATH11K_BDF_NAME_CHIP_ID,
+};
+
+static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len, bool with_variant,
+ enum ath11k_bdf_name_type name_type)
+{
+ /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
+ char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = {};
+
+ if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
+ scnprintf(variant, sizeof(variant), ",variant=%s",
+ ab->qmi.target.bdf_ext);
+
+ switch (ab->id.bdf_search) {
+ case ATH11K_BDF_SEARCH_BUS_AND_BOARD:
+ switch (name_type) {
+ case ATH11K_BDF_NAME_FULL:
+ scnprintf(name, name_len,
+ "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath11k_bus_str(ab->hif.bus),
+ ab->id.vendor, ab->id.device,
+ ab->id.subsystem_vendor,
+ ab->id.subsystem_device,
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id,
+ variant);
+ break;
+ case ATH11K_BDF_NAME_BUS_NAME:
+ scnprintf(name, name_len,
+ "bus=%s",
+ ath11k_bus_str(ab->hif.bus));
+ break;
+ case ATH11K_BDF_NAME_CHIP_ID:
+ scnprintf(name, name_len,
+ "bus=%s,qmi-chip-id=%d",
+ ath11k_bus_str(ab->hif.bus),
+ ab->qmi.target.chip_id);
+ break;
+ }
+ break;
+ default:
+ scnprintf(name, name_len,
+ "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath11k_bus_str(ab->hif.bus),
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id, variant);
+ break;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board name '%s'\n", name);
+
+ return 0;
+}
+
+static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath11k_core_create_board_name(ab, name, name_len, true,
+ ATH11K_BDF_NAME_FULL);
+}
+
+static int ath11k_core_create_fallback_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath11k_core_create_board_name(ab, name, name_len, false,
+ ATH11K_BDF_NAME_FULL);
+}
+
+static int ath11k_core_create_bus_type_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath11k_core_create_board_name(ab, name, name_len, false,
+ ATH11K_BDF_NAME_BUS_NAME);
+}
+
+static int ath11k_core_create_chip_id_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath11k_core_create_board_name(ab, name, name_len, false,
+ ATH11K_BDF_NAME_CHIP_ID);
+}
+
+const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
+ const char *file)
+{
+ const struct firmware *fw;
+ char path[100];
+ int ret;
+
+ if (file == NULL)
+ return ERR_PTR(-ENOENT);
+
+ ath11k_core_create_firmware_path(ab, file, path, sizeof(path));
+
+ ret = firmware_request_nowarn(&fw, path, ab->dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "firmware request %s size %zu\n",
+ path, fw->size);
+
+ return fw;
+}
+
+void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
+{
+ if (!IS_ERR(bd->fw))
+ release_firmware(bd->fw);
+
+ memset(bd, 0, sizeof(*bd));
+}
+
+static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const void *buf, size_t buf_len,
+ const char *boardname,
+ int ie_id,
+ int name_id,
+ int data_id)
+{
+ const struct ath11k_fw_ie *hdr;
+ bool name_match_found;
+ int ret, board_ie_id;
+ size_t board_ie_len;
+ const void *board_ie_data;
+
+ name_match_found = false;
+
+ /* go through ATH11K_BD_IE_BOARD_/ATH11K_BD_IE_REGDB_ elements */
+ while (buf_len > sizeof(struct ath11k_fw_ie)) {
+ hdr = buf;
+ board_ie_id = le32_to_cpu(hdr->id);
+ board_ie_len = le32_to_cpu(hdr->len);
+ board_ie_data = hdr->data;
+
+ buf_len -= sizeof(*hdr);
+ buf += sizeof(*hdr);
+
+ if (buf_len < ALIGN(board_ie_len, 4)) {
+ ath11k_err(ab, "invalid %s length: %zu < %zu\n",
+ ath11k_bd_ie_type_str(ie_id),
+ buf_len, ALIGN(board_ie_len, 4));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (board_ie_id == name_id) {
+ ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "board name", "",
+ board_ie_data, board_ie_len);
+
+ if (board_ie_len != strlen(boardname))
+ goto next;
+
+ ret = memcmp(board_ie_data, boardname, strlen(boardname));
+ if (ret)
+ goto next;
+
+ name_match_found = true;
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "found match %s for name '%s'",
+ ath11k_bd_ie_type_str(ie_id),
+ boardname);
+ } else if (board_ie_id == data_id) {
+ if (!name_match_found)
+ /* no match found */
+ goto next;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "found %s for '%s'",
+ ath11k_bd_ie_type_str(ie_id),
+ boardname);
+
+ bd->data = board_ie_data;
+ bd->len = board_ie_len;
+
+ ret = 0;
+ goto out;
+ } else {
+ ath11k_warn(ab, "unknown %s id found: %d\n",
+ ath11k_bd_ie_type_str(ie_id),
+ board_ie_id);
+ }
+next:
+ /* jump over the padding */
+ board_ie_len = ALIGN(board_ie_len, 4);
+
+ buf_len -= board_ie_len;
+ buf += board_ie_len;
+ }
+
+ /* no match found */
+ ret = -ENOENT;
+
+out:
+ return ret;
+}
+
+static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const char *boardname,
+ int ie_id_match,
+ int name_id,
+ int data_id)
+{
+ size_t len, magic_len;
+ const u8 *data;
+ char *filename, filepath[100];
+ size_t ie_len;
+ struct ath11k_fw_ie *hdr;
+ int ret, ie_id;
+
+ filename = ATH11K_BOARD_API2_FILE;
+
+ if (!bd->fw)
+ bd->fw = ath11k_core_firmware_request(ab, filename);
+
+ if (IS_ERR(bd->fw))
+ return PTR_ERR(bd->fw);
+
+ data = bd->fw->data;
+ len = bd->fw->size;
+
+ ath11k_core_create_firmware_path(ab, filename,
+ filepath, sizeof(filepath));
+
+ /* magic has extra null byte padded */
+ magic_len = strlen(ATH11K_BOARD_MAGIC) + 1;
+ if (len < magic_len) {
+ ath11k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
+ filepath, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH11K_BOARD_MAGIC, magic_len)) {
+ ath11k_err(ab, "found invalid board magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* magic is padded to 4 bytes */
+ magic_len = ALIGN(magic_len, 4);
+ if (len < magic_len) {
+ ath11k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
+ filepath, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ data += magic_len;
+ len -= magic_len;
+
+ while (len > sizeof(struct ath11k_fw_ie)) {
+ hdr = (struct ath11k_fw_ie *)data;
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data = hdr->data;
+
+ if (len < ALIGN(ie_len, 4)) {
+ ath11k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+ ie_id, ie_len, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (ie_id == ie_id_match) {
+ ret = ath11k_core_parse_bd_ie_board(ab, bd, data,
+ ie_len,
+ boardname,
+ ie_id_match,
+ name_id,
+ data_id);
+ if (ret == -ENOENT)
+ /* no match found, continue */
+ goto next;
+ else if (ret)
+ /* there was an error, bail out */
+ goto err;
+ /* either found or error, so stop searching */
+ goto out;
+ }
+next:
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+out:
+ if (!bd->data || !bd->len) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "failed to fetch %s for %s from %s\n",
+ ath11k_bd_ie_type_str(ie_id_match),
+ boardname, filepath);
+ ret = -ENODATA;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ath11k_core_free_bdf(ab, bd);
+ return ret;
+}
+
+int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const char *name)
+{
+ bd->fw = ath11k_core_firmware_request(ab, name);
+
+ if (IS_ERR(bd->fw))
+ return PTR_ERR(bd->fw);
+
+ bd->data = bd->fw->data;
+ bd->len = bd->fw->size;
+
+ return 0;
+}
+
+#define BOARD_NAME_SIZE 200
+int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
+{
+ char *boardname = NULL, *fallback_boardname = NULL, *chip_id_boardname = NULL;
+ char *filename, filepath[100];
+ int bd_api;
+ int ret = 0;
+
+ filename = ATH11K_BOARD_API2_FILE;
+ boardname = kzalloc(BOARD_NAME_SIZE, GFP_KERNEL);
+ if (!boardname) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ if (ret) {
+ ath11k_err(ab, "failed to create board name: %d", ret);
+ goto exit;
+ }
+
+ bd_api = 2;
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH11K_BD_IE_BOARD,
+ ATH11K_BD_IE_BOARD_NAME,
+ ATH11K_BD_IE_BOARD_DATA);
+ if (!ret)
+ goto exit;
+
+ fallback_boardname = kzalloc(BOARD_NAME_SIZE, GFP_KERNEL);
+ if (!fallback_boardname) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = ath11k_core_create_fallback_board_name(ab, fallback_boardname,
+ BOARD_NAME_SIZE);
+ if (ret) {
+ ath11k_err(ab, "failed to create fallback board name: %d", ret);
+ goto exit;
+ }
+
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
+ ATH11K_BD_IE_BOARD,
+ ATH11K_BD_IE_BOARD_NAME,
+ ATH11K_BD_IE_BOARD_DATA);
+ if (!ret)
+ goto exit;
+
+ chip_id_boardname = kzalloc(BOARD_NAME_SIZE, GFP_KERNEL);
+ if (!chip_id_boardname) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = ath11k_core_create_chip_id_board_name(ab, chip_id_boardname,
+ BOARD_NAME_SIZE);
+ if (ret) {
+ ath11k_err(ab, "failed to create chip id board name: %d", ret);
+ goto exit;
+ }
+
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, chip_id_boardname,
+ ATH11K_BD_IE_BOARD,
+ ATH11K_BD_IE_BOARD_NAME,
+ ATH11K_BD_IE_BOARD_DATA);
+
+ if (!ret)
+ goto exit;
+
+ bd_api = 1;
+ ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_DEFAULT_BOARD_FILE);
+ if (ret) {
+ ath11k_core_create_firmware_path(ab, filename,
+ filepath, sizeof(filepath));
+ ath11k_err(ab, "failed to fetch board data for %s from %s\n",
+ boardname, filepath);
+ if (memcmp(boardname, fallback_boardname, strlen(boardname)))
+ ath11k_err(ab, "failed to fetch board data for %s from %s\n",
+ fallback_boardname, filepath);
+
+ ath11k_err(ab, "failed to fetch board data for %s from %s\n",
+ chip_id_boardname, filepath);
+
+ ath11k_err(ab, "failed to fetch board.bin from %s\n",
+ ab->hw_params.fw.dir);
+ }
+
+exit:
+ kfree(boardname);
+ kfree(fallback_boardname);
+ kfree(chip_id_boardname);
+
+ if (!ret)
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board api %d\n", bd_api);
+
+ return ret;
+}
+
+int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd)
+{
+ char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
+ int ret;
+
+ ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ if (ret) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "failed to create board name for regdb: %d", ret);
+ goto exit;
+ }
+
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH11K_BD_IE_REGDB,
+ ATH11K_BD_IE_REGDB_NAME,
+ ATH11K_BD_IE_REGDB_DATA);
+ if (!ret)
+ goto exit;
+
+ ret = ath11k_core_create_bus_type_board_name(ab, default_boardname,
+ BOARD_NAME_SIZE);
+ if (ret) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "failed to create default board name for regdb: %d", ret);
+ goto exit;
+ }
+
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, default_boardname,
+ ATH11K_BD_IE_REGDB,
+ ATH11K_BD_IE_REGDB_NAME,
+ ATH11K_BD_IE_REGDB_DATA);
+ if (!ret)
+ goto exit;
+
+ ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_REGDB_FILE_NAME);
+ if (ret)
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s from %s\n",
+ ATH11K_REGDB_FILE_NAME, ab->hw_params.fw.dir);
+
+exit:
+ if (!ret)
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "fetched regdb\n");
+
+ return ret;
+}
+
+static void ath11k_core_stop(struct ath11k_base *ab)
+{
+ if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath11k_qmi_firmware_stop(ab);
+
+ ath11k_hif_stop(ab);
+ ath11k_wmi_detach(ab);
+ ath11k_dp_pdev_reo_cleanup(ab);
+
+ /* De-Init of components as needed */
+}
+
+static int ath11k_core_soc_create(struct ath11k_base *ab)
+{
+ int ret;
+
+ if (ath11k_ftm_mode) {
+ ab->fw_mode = ATH11K_FIRMWARE_MODE_FTM;
+ ath11k_info(ab, "Booting in factory test mode\n");
+ }
+
+ ret = ath11k_qmi_init_service(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to initialize qmi :%d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_debugfs_soc_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create ath11k debugfs\n");
+ goto err_qmi_deinit;
+ }
+
+ ret = ath11k_hif_power_up(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to power up :%d\n", ret);
+ goto err_debugfs_reg;
+ }
+
+ return 0;
+
+err_debugfs_reg:
+ ath11k_debugfs_soc_destroy(ab);
+err_qmi_deinit:
+ ath11k_qmi_deinit_service(ab);
+ return ret;
+}
+
+static void ath11k_core_soc_destroy(struct ath11k_base *ab)
+{
+ ath11k_debugfs_soc_destroy(ab);
+ ath11k_dp_free(ab);
+ ath11k_reg_free(ab);
+ ath11k_qmi_deinit_service(ab);
+}
+
+static int ath11k_core_pdev_create(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_debugfs_pdev_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create core pdev debugfs: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_pdev_alloc(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to attach DP pdev: %d\n", ret);
+ goto err_pdev_debug;
+ }
+
+ ret = ath11k_mac_register(ab);
+ if (ret) {
+ ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret);
+ goto err_dp_pdev_free;
+ }
+
+ ret = ath11k_thermal_register(ab);
+ if (ret) {
+ ath11k_err(ab, "could not register thermal device: %d\n",
+ ret);
+ goto err_mac_unregister;
+ }
+
+ ret = ath11k_spectral_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init spectral %d\n", ret);
+ goto err_thermal_unregister;
+ }
+
+ return 0;
+
+err_thermal_unregister:
+ ath11k_thermal_unregister(ab);
+err_mac_unregister:
+ ath11k_mac_unregister(ab);
+err_dp_pdev_free:
+ ath11k_dp_pdev_free(ab);
+err_pdev_debug:
+ ath11k_debugfs_pdev_destroy(ab);
+
+ return ret;
+}
+
+static void ath11k_core_pdev_suspend_target(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ unsigned long time_left;
+ int ret;
+ int i;
+
+ if (!ab->hw_params.pdev_suspend)
+ return;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ reinit_completion(&ab->htc_suspend);
+
+ ret = ath11k_wmi_pdev_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+ pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ab, "could not suspend target :%d\n", ret);
+ /* pointless to try other pdevs */
+ return;
+ }
+
+ time_left = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
+
+ if (!time_left) {
+ ath11k_warn(ab, "suspend timed out - target pause event never came\n");
+ /* pointless to try other pdevs */
+ return;
+ }
+ }
+}
+
+static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
+{
+ ath11k_spectral_deinit(ab);
+ ath11k_thermal_unregister(ab);
+ ath11k_mac_unregister(ab);
+ ath11k_core_pdev_suspend_target(ab);
+ ath11k_hif_irq_disable(ab);
+ ath11k_dp_pdev_free(ab);
+ ath11k_debugfs_pdev_destroy(ab);
+}
+
+static int ath11k_core_start(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_wmi_attach(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to attach wmi: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_htc_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init htc: %d\n", ret);
+ goto err_wmi_detach;
+ }
+
+ ret = ath11k_hif_start(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to start HIF: %d\n", ret);
+ goto err_wmi_detach;
+ }
+
+ ret = ath11k_htc_wait_target(&ab->htc);
+ if (ret) {
+ ath11k_err(ab, "failed to connect to HTC: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_dp_htt_connect(&ab->dp);
+ if (ret) {
+ ath11k_err(ab, "failed to connect to HTT: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_wmi_connect(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to connect wmi: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_htc_start(&ab->htc);
+ if (ret) {
+ ath11k_err(ab, "failed to start HTC: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_wmi_wait_for_service_ready(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to receive wmi service ready event: %d\n",
+ ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_mac_allocate(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create new hw device with mac80211 :%d\n",
+ ret);
+ goto err_hif_stop;
+ }
+
+ ath11k_dp_pdev_pre_alloc(ab);
+
+ ret = ath11k_dp_pdev_reo_setup(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
+ goto err_mac_destroy;
+ }
+
+ ret = ath11k_wmi_cmd_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to send wmi init cmd: %d\n", ret);
+ goto err_reo_cleanup;
+ }
+
+ ret = ath11k_wmi_wait_for_unified_ready(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to receive wmi unified ready event: %d\n",
+ ret);
+ goto err_reo_cleanup;
+ }
+
+ /* put hardware to DBS mode */
+ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1) {
+ ret = ath11k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
+ if (ret) {
+ ath11k_err(ab, "failed to send dbs mode: %d\n", ret);
+ goto err_hif_stop;
+ }
+ }
+
+ ret = ath11k_dp_tx_htt_h2t_ver_req_msg(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to send htt version request message: %d\n",
+ ret);
+ goto err_reo_cleanup;
+ }
+
+ return 0;
+
+err_reo_cleanup:
+ ath11k_dp_pdev_reo_cleanup(ab);
+err_mac_destroy:
+ ath11k_mac_destroy(ab);
+err_hif_stop:
+ ath11k_hif_stop(ab);
+err_wmi_detach:
+ ath11k_wmi_detach(ab);
+
+ return ret;
+}
+
+static int ath11k_core_start_firmware(struct ath11k_base *ab,
+ enum ath11k_firmware_mode mode)
+{
+ int ret;
+
+ ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2,
+ &ab->qmi.ce_cfg.shadow_reg_v2_len);
+
+ ret = ath11k_qmi_firmware_start(ab, mode);
+ if (ret) {
+ ath11k_err(ab, "failed to send firmware start: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
+{
+ int ret;
+
+ switch (ath11k_crypto_mode) {
+ case ATH11K_CRYPT_MODE_SW:
+ set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ case ATH11K_CRYPT_MODE_HW:
+ clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ default:
+ ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode);
+ return -EINVAL;
+ }
+
+ ret = ath11k_core_start_firmware(ab, ab->fw_mode);
+ if (ret) {
+ ath11k_err(ab, "failed to start firmware: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_ce_init_pipes(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to initialize CE: %d\n", ret);
+ goto err_firmware_stop;
+ }
+
+ ret = ath11k_dp_alloc(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init DP: %d\n", ret);
+ goto err_firmware_stop;
+ }
+
+ if (ath11k_frame_mode == ATH11K_HW_TXRX_RAW)
+ set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+
+ mutex_lock(&ab->core_lock);
+ ret = ath11k_core_start(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to start core: %d\n", ret);
+ goto err_dp_free;
+ }
+
+ ret = ath11k_core_pdev_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create pdev core: %d\n", ret);
+ goto err_core_stop;
+ }
+ ath11k_hif_irq_enable(ab);
+ mutex_unlock(&ab->core_lock);
+
+ return 0;
+
+err_core_stop:
+ ath11k_core_stop(ab);
+ ath11k_mac_destroy(ab);
+err_dp_free:
+ ath11k_dp_free(ab);
+ mutex_unlock(&ab->core_lock);
+err_firmware_stop:
+ ath11k_qmi_firmware_stop(ab);
+
+ return ret;
+}
+
+static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
+{
+ int ret;
+
+ mutex_lock(&ab->core_lock);
+ ath11k_thermal_unregister(ab);
+ ath11k_dp_pdev_free(ab);
+ ath11k_spectral_deinit(ab);
+ ath11k_ce_cleanup_pipes(ab);
+ ath11k_wmi_detach(ab);
+ ath11k_dp_pdev_reo_cleanup(ab);
+ mutex_unlock(&ab->core_lock);
+
+ ath11k_dp_free(ab);
+ ath11k_hal_srng_clear(ab);
+
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
+
+ clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+
+ ret = ath11k_core_qmi_firmware_ready(ab);
+ if (ret)
+ goto err_hal_srng_deinit;
+
+ clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+
+ return 0;
+
+err_hal_srng_deinit:
+ ath11k_hal_srng_deinit(ab);
+ return ret;
+}
+
+void ath11k_core_halt(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct list_head *pos, *n;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ar->num_created_vdevs = 0;
+ ar->allocated_vdev_map = 0;
+
+ ath11k_mac_scan_finish(ar);
+ ath11k_mac_peer_cleanup_all(ar);
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->channel_update_work);
+ cancel_work_sync(&ar->regd_update_work);
+ cancel_work_sync(&ab->update_11d_work);
+
+ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
+ synchronize_rcu();
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_safe(pos, n, &ar->arvifs)
+ list_del_init(pos);
+ spin_unlock_bh(&ar->data_lock);
+
+ idr_init(&ar->txmgmt_idr);
+}
+
+static void ath11k_update_11d(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work);
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int ret, i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ spin_lock_bh(&ab->base_lock);
+ memcpy(&ar->alpha2, &ab->new_alpha2, 2);
+ spin_unlock_bh(&ab->base_lock);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c for pdev %d\n",
+ ar->alpha2[0], ar->alpha2[1], i);
+
+ ret = ath11k_reg_set_cc(ar);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "pdev id %d failed set current country code: %d\n",
+ i, ret);
+ }
+}
+
+void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ spin_lock_bh(&ab->base_lock);
+ ab->stats.fw_crash_counter++;
+ spin_unlock_bh(&ab->base_lock);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar || ar->state == ATH11K_STATE_OFF ||
+ ar->state == ATH11K_STATE_FTM)
+ continue;
+
+ ieee80211_stop_queues(ar->hw);
+ ath11k_mac_drain_tx(ar);
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ complete(&ar->scan.started);
+ complete_all(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
+ complete(&ar->peer_assoc_done);
+ complete(&ar->peer_delete_done);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->vdev_delete_done);
+ complete(&ar->bss_survey_done);
+ complete(&ar->thermal.wmi_sync);
+
+ wake_up(&ar->dp.tx_empty_waitq);
+ idr_for_each(&ar->txmgmt_idr,
+ ath11k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+ wake_up(&ar->txmgmt_empty_waitq);
+
+ ar->monitor_vdev_id = -1;
+ clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ }
+
+ wake_up(&ab->wmi_ab.tx_credits_wq);
+ wake_up(&ab->peer_mapping_wq);
+
+ reinit_completion(&ab->driver_recovery);
+}
+
+static void ath11k_core_post_reconfigure_recovery(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar || ar->state == ATH11K_STATE_OFF)
+ continue;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH11K_STATE_ON:
+ ar->state = ATH11K_STATE_RESTARTING;
+ ath11k_core_halt(ar);
+ ieee80211_restart_hw(ar->hw);
+ break;
+ case ATH11K_STATE_OFF:
+ ath11k_warn(ab,
+ "cannot restart radio %d that hasn't been started\n",
+ i);
+ break;
+ case ATH11K_STATE_RESTARTING:
+ break;
+ case ATH11K_STATE_RESTARTED:
+ ar->state = ATH11K_STATE_WEDGED;
+ fallthrough;
+ case ATH11K_STATE_WEDGED:
+ ath11k_warn(ab,
+ "device is wedged, will not restart radio %d\n", i);
+ break;
+ case ATH11K_STATE_FTM:
+ ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
+ "fw mode reset done radio %d\n", i);
+ break;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+ complete(&ab->driver_recovery);
+}
+
+static void ath11k_core_restart(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
+ int ret;
+
+ ret = ath11k_core_reconfigure_on_crash(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to reconfigure driver on crash recovery\n");
+ return;
+ }
+
+ if (ab->is_reset)
+ complete_all(&ab->reconfigure_complete);
+
+ if (!ab->is_reset)
+ ath11k_core_post_reconfigure_recovery(ab);
+
+ complete(&ab->restart_completed);
+}
+
+static void ath11k_core_reset(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, reset_work);
+ int reset_count, fail_cont_count;
+ long time_left;
+
+ if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))) {
+ ath11k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
+ return;
+ }
+
+ /* Sometimes the recovery will fail and then the next all recovery fail,
+ * this is to avoid infinite recovery since it can not recovery success.
+ */
+ fail_cont_count = atomic_read(&ab->fail_cont_count);
+
+ if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FINAL)
+ return;
+
+ if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FIRST &&
+ time_before(jiffies, ab->reset_fail_timeout))
+ return;
+
+ reset_count = atomic_inc_return(&ab->reset_count);
+
+ if (reset_count > 1) {
+ /* Sometimes it happened another reset worker before the previous one
+ * completed, then the second reset worker will destroy the previous one,
+ * thus below is to avoid that.
+ */
+ ath11k_warn(ab, "already resetting count %d\n", reset_count);
+
+ reinit_completion(&ab->reset_complete);
+ time_left = wait_for_completion_timeout(&ab->reset_complete,
+ ATH11K_RESET_TIMEOUT_HZ);
+
+ if (time_left) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "to skip reset\n");
+ atomic_dec(&ab->reset_count);
+ return;
+ }
+
+ ab->reset_fail_timeout = jiffies + ATH11K_RESET_FAIL_TIMEOUT_HZ;
+ /* Record the continuous recovery fail count when recovery failed*/
+ atomic_inc(&ab->fail_cont_count);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset starting\n");
+
+ ab->is_reset = true;
+ atomic_set(&ab->recovery_count, 0);
+ reinit_completion(&ab->recovery_start);
+ atomic_set(&ab->recovery_start_count, 0);
+
+ ath11k_coredump_collect(ab);
+ ath11k_core_pre_reconfigure_recovery(ab);
+
+ reinit_completion(&ab->reconfigure_complete);
+ ath11k_core_post_reconfigure_recovery(ab);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "waiting recovery start...\n");
+
+ time_left = wait_for_completion_timeout(&ab->recovery_start,
+ ATH11K_RECOVER_START_TIMEOUT_HZ);
+
+ ath11k_hif_irq_disable(ab);
+ ath11k_hif_ce_irq_disable(ab);
+
+ ath11k_hif_power_down(ab, false);
+ ath11k_hif_power_up(ab);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n");
+}
+
+static int ath11k_init_hw_params(struct ath11k_base *ab)
+{
+ const struct ath11k_hw_params *hw_params = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ath11k_hw_params); i++) {
+ hw_params = &ath11k_hw_params[i];
+
+ if (hw_params->hw_rev == ab->hw_rev)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ath11k_hw_params)) {
+ ath11k_err(ab, "Unsupported hardware version: 0x%x\n", ab->hw_rev);
+ return -EINVAL;
+ }
+
+ ab->hw_params = *hw_params;
+
+ ath11k_info(ab, "%s\n", ab->hw_params.name);
+
+ return 0;
+}
+
+int ath11k_core_pre_init(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_init_hw_params(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to get hw params: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_fw_pre_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to pre init firmware: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_core_pre_init);
+
+static int ath11k_core_pm_notify(struct notifier_block *nb,
+ unsigned long action, void *nouse)
+{
+ struct ath11k_base *ab = container_of(nb, struct ath11k_base,
+ pm_nb);
+
+ switch (action) {
+ case PM_SUSPEND_PREPARE:
+ ab->actual_pm_policy = ab->pm_policy;
+ break;
+ case PM_HIBERNATION_PREPARE:
+ ab->actual_pm_policy = ATH11K_PM_DEFAULT;
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int ath11k_core_pm_notifier_register(struct ath11k_base *ab)
+{
+ ab->pm_nb.notifier_call = ath11k_core_pm_notify;
+ return register_pm_notifier(&ab->pm_nb);
+}
+
+void ath11k_core_pm_notifier_unregister(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = unregister_pm_notifier(&ab->pm_nb);
+ if (ret)
+ /* just warn here, there is nothing can be done in fail case */
+ ath11k_warn(ab, "failed to unregister PM notifier %d\n", ret);
+}
+EXPORT_SYMBOL(ath11k_core_pm_notifier_unregister);
+
+int ath11k_core_init(struct ath11k_base *ab)
+{
+ const struct dmi_system_id *dmi_id;
+ int ret;
+
+ dmi_id = dmi_first_match(ath11k_pm_quirk_table);
+ if (dmi_id)
+ ab->pm_policy = (kernel_ulong_t)dmi_id->driver_data;
+ else
+ ab->pm_policy = ATH11K_PM_DEFAULT;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "pm policy %u\n", ab->pm_policy);
+
+ ret = ath11k_core_pm_notifier_register(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to register PM notifier: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_core_soc_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create soc core: %d\n", ret);
+ goto err_unregister_pm_notifier;
+ }
+
+ return 0;
+
+err_unregister_pm_notifier:
+ ath11k_core_pm_notifier_unregister(ab);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath11k_core_init);
+
+void ath11k_core_deinit(struct ath11k_base *ab)
+{
+ mutex_lock(&ab->core_lock);
+
+ ath11k_core_pdev_destroy(ab);
+ ath11k_core_stop(ab);
+
+ mutex_unlock(&ab->core_lock);
+
+ ath11k_hif_power_down(ab, false);
+ ath11k_mac_destroy(ab);
+ ath11k_core_soc_destroy(ab);
+ ath11k_core_pm_notifier_unregister(ab);
+}
+EXPORT_SYMBOL(ath11k_core_deinit);
+
+void ath11k_core_free(struct ath11k_base *ab)
+{
+ destroy_workqueue(ab->workqueue_aux);
+ destroy_workqueue(ab->workqueue);
+
+ kfree(ab);
+}
+EXPORT_SYMBOL(ath11k_core_free);
+
+struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
+ enum ath11k_bus bus)
+{
+ struct ath11k_base *ab;
+
+ ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
+ if (!ab)
+ return NULL;
+
+ init_completion(&ab->driver_recovery);
+
+ ab->workqueue = create_singlethread_workqueue("ath11k_wq");
+ if (!ab->workqueue)
+ goto err_sc_free;
+
+ ab->workqueue_aux = create_singlethread_workqueue("ath11k_aux_wq");
+ if (!ab->workqueue_aux)
+ goto err_free_wq;
+
+ mutex_init(&ab->core_lock);
+ mutex_init(&ab->tbl_mtx_lock);
+ spin_lock_init(&ab->base_lock);
+ mutex_init(&ab->vdev_id_11d_lock);
+ init_completion(&ab->reset_complete);
+ init_completion(&ab->reconfigure_complete);
+ init_completion(&ab->recovery_start);
+
+ INIT_LIST_HEAD(&ab->peers);
+ init_waitqueue_head(&ab->peer_mapping_wq);
+ init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
+ init_waitqueue_head(&ab->qmi.cold_boot_waitq);
+ INIT_WORK(&ab->restart_work, ath11k_core_restart);
+ INIT_WORK(&ab->update_11d_work, ath11k_update_11d);
+ INIT_WORK(&ab->reset_work, ath11k_core_reset);
+ INIT_WORK(&ab->dump_work, ath11k_coredump_upload);
+ timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
+ init_completion(&ab->htc_suspend);
+ init_completion(&ab->wow.wakeup_completed);
+ init_completion(&ab->restart_completed);
+
+ ab->dev = dev;
+ ab->hif.bus = bus;
+
+ return ab;
+
+err_free_wq:
+ destroy_workqueue(ab->workqueue);
+err_sc_free:
+ kfree(ab);
+ return NULL;
+}
+EXPORT_SYMBOL(ath11k_core_alloc);
+
+MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ax wireless LAN cards.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
new file mode 100644
index 000000000000..e8780b05ce11
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -0,0 +1,1354 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_CORE_H
+#define ATH11K_CORE_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/bitfield.h>
+#include <linux/dmi.h>
+#include <linux/ctype.h>
+#include <linux/rhashtable.h>
+#include <linux/average.h>
+#include <linux/firmware.h>
+#include <linux/suspend.h>
+#include <linux/of.h>
+
+#include "qmi.h"
+#include "htc.h"
+#include "wmi.h"
+#include "hal.h"
+#include "dp.h"
+#include "ce.h"
+#include "mac.h"
+#include "hw.h"
+#include "hal_rx.h"
+#include "reg.h"
+#include "thermal.h"
+#include "dbring.h"
+#include "spectral.h"
+#include "wow.h"
+#include "fw.h"
+#include "coredump.h"
+
+#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
+
+#define ATH11K_TX_MGMT_NUM_PENDING_MAX 512
+
+#define ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI 64
+
+/* Pending management packets threshold for dropping probe responses */
+#define ATH11K_PRB_RSP_DROP_THRESHOLD ((ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4)
+
+#define ATH11K_INVALID_HW_MAC_ID 0xFF
+#define ATH11K_CONNECTION_LOSS_HZ (3 * HZ)
+
+/* SMBIOS type containing Board Data File Name Extension */
+#define ATH11K_SMBIOS_BDF_EXT_TYPE 0xF8
+
+/* SMBIOS type structure length (excluding strings-set) */
+#define ATH11K_SMBIOS_BDF_EXT_LENGTH 0x9
+
+/* The magic used by QCA spec */
+#define ATH11K_SMBIOS_BDF_EXT_MAGIC "BDF_"
+
+extern unsigned int ath11k_frame_mode;
+extern bool ath11k_ftm_mode;
+
+#define ATH11K_SCAN_TIMEOUT_HZ (20 * HZ)
+
+#define ATH11K_MON_TIMER_INTERVAL 10
+#define ATH11K_RESET_TIMEOUT_HZ (20 * HZ)
+#define ATH11K_RESET_MAX_FAIL_COUNT_FIRST 3
+#define ATH11K_RESET_MAX_FAIL_COUNT_FINAL 5
+#define ATH11K_RESET_FAIL_TIMEOUT_HZ (20 * HZ)
+#define ATH11K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
+#define ATH11K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
+
+enum ath11k_supported_bw {
+ ATH11K_BW_20 = 0,
+ ATH11K_BW_40 = 1,
+ ATH11K_BW_80 = 2,
+ ATH11K_BW_160 = 3,
+};
+
+enum ath11k_bdf_search {
+ ATH11K_BDF_SEARCH_DEFAULT,
+ ATH11K_BDF_SEARCH_BUS_AND_BOARD,
+};
+
+enum wme_ac {
+ WME_AC_BE,
+ WME_AC_BK,
+ WME_AC_VI,
+ WME_AC_VO,
+ WME_NUM_AC
+};
+
+#define ATH11K_HT_MCS_MAX 7
+#define ATH11K_VHT_MCS_MAX 9
+#define ATH11K_HE_MCS_MAX 11
+
+enum ath11k_crypt_mode {
+ /* Only use hardware crypto engine */
+ ATH11K_CRYPT_MODE_HW,
+ /* Only use software crypto */
+ ATH11K_CRYPT_MODE_SW,
+};
+
+static inline enum wme_ac ath11k_tid_to_ac(u32 tid)
+{
+ return (((tid == 0) || (tid == 3)) ? WME_AC_BE :
+ ((tid == 1) || (tid == 2)) ? WME_AC_BK :
+ ((tid == 4) || (tid == 5)) ? WME_AC_VI :
+ WME_AC_VO);
+}
+
+enum ath11k_skb_flags {
+ ATH11K_SKB_HW_80211_ENCAP = BIT(0),
+ ATH11K_SKB_CIPHER_SET = BIT(1),
+};
+
+struct ath11k_skb_cb {
+ dma_addr_t paddr;
+ u8 eid;
+ u8 flags;
+ u32 cipher;
+ struct ath11k *ar;
+ struct ieee80211_vif *vif;
+} __packed;
+
+struct ath11k_skb_rxcb {
+ dma_addr_t paddr;
+ bool is_first_msdu;
+ bool is_last_msdu;
+ bool is_continuation;
+ bool is_mcbc;
+ bool is_eapol;
+ struct hal_rx_desc *rx_desc;
+ u8 err_rel_src;
+ u8 err_code;
+ u8 mac_id;
+ u8 unmapped;
+ u8 is_frag;
+ u8 tid;
+ u16 peer_id;
+ u16 seq_no;
+};
+
+enum ath11k_hw_rev {
+ ATH11K_HW_IPQ8074,
+ ATH11K_HW_QCA6390_HW20,
+ ATH11K_HW_IPQ6018_HW10,
+ ATH11K_HW_QCN9074_HW10,
+ ATH11K_HW_WCN6855_HW20,
+ ATH11K_HW_WCN6855_HW21,
+ ATH11K_HW_WCN6750_HW10,
+ ATH11K_HW_IPQ5018_HW10,
+ ATH11K_HW_QCA2066_HW21,
+ ATH11K_HW_QCA6698AQ_HW21,
+};
+
+enum ath11k_firmware_mode {
+ /* the default mode, standard 802.11 functionality */
+ ATH11K_FIRMWARE_MODE_NORMAL,
+
+ /* factory tests etc */
+ ATH11K_FIRMWARE_MODE_FTM,
+
+ /* Cold boot calibration */
+ ATH11K_FIRMWARE_MODE_COLD_BOOT = 7,
+};
+
+extern bool ath11k_cold_boot_cal;
+
+#define ATH11K_IRQ_NUM_MAX 52
+#define ATH11K_EXT_IRQ_NUM_MAX 16
+
+struct ath11k_ext_irq_grp {
+ struct ath11k_base *ab;
+ u32 irqs[ATH11K_EXT_IRQ_NUM_MAX];
+ u32 num_irq;
+ u32 grp_id;
+ u64 timestamp;
+ bool napi_enabled;
+ struct napi_struct napi;
+ struct net_device *napi_ndev;
+};
+
+enum ath11k_smbios_cc_type {
+ /* disable country code setting from SMBIOS */
+ ATH11K_SMBIOS_CC_DISABLE = 0,
+
+ /* set country code by ANSI country name, based on ISO3166-1 alpha2 */
+ ATH11K_SMBIOS_CC_ISO = 1,
+
+ /* worldwide regdomain */
+ ATH11K_SMBIOS_CC_WW = 2,
+};
+
+struct ath11k_smbios_bdf {
+ struct dmi_header hdr;
+
+ u8 features_disabled;
+
+ /* enum ath11k_smbios_cc_type */
+ u8 country_code_flag;
+
+ /* To set specific country, you need to set country code
+ * flag=ATH11K_SMBIOS_CC_ISO first, then if country is United
+ * States, then country code value = 0x5553 ("US",'U' = 0x55, 'S'=
+ * 0x53). To set country to INDONESIA, then country code value =
+ * 0x4944 ("IN", 'I'=0x49, 'D'=0x44). If country code flag =
+ * ATH11K_SMBIOS_CC_WW, then you can use worldwide regulatory
+ * setting.
+ */
+ u16 cc_code;
+
+ u8 bdf_enabled;
+ u8 bdf_ext[];
+} __packed;
+
+#define HEHANDLE_CAP_PHYINFO_SIZE 3
+#define HECAP_PHYINFO_SIZE 9
+#define HECAP_MACINFO_SIZE 5
+#define HECAP_TXRX_MCS_NSS_SIZE 2
+#define HECAP_PPET16_PPET8_MAX_SIZE 25
+
+#define HE_PPET16_PPET8_SIZE 8
+
+/* 802.11ax PPE (PPDU packet Extension) threshold */
+struct he_ppe_threshold {
+ u32 numss_m1;
+ u32 ru_mask;
+ u32 ppet16_ppet8_ru3_ru0[HE_PPET16_PPET8_SIZE];
+};
+
+struct ath11k_he {
+ u8 hecap_macinfo[HECAP_MACINFO_SIZE];
+ u32 hecap_rxmcsnssmap;
+ u32 hecap_txmcsnssmap;
+ u32 hecap_phyinfo[HEHANDLE_CAP_PHYINFO_SIZE];
+ struct he_ppe_threshold hecap_ppet;
+ u32 heop_param;
+};
+
+#define MAX_RADIOS 3
+
+/* ipq5018 hw param macros */
+#define MAX_RADIOS_5018 1
+#define CE_CNT_5018 6
+#define TARGET_CE_CNT_5018 9
+#define SVC_CE_MAP_LEN_5018 17
+#define RXDMA_PER_PDEV_5018 1
+
+enum {
+ WMI_HOST_TP_SCALE_MAX = 0,
+ WMI_HOST_TP_SCALE_50 = 1,
+ WMI_HOST_TP_SCALE_25 = 2,
+ WMI_HOST_TP_SCALE_12 = 3,
+ WMI_HOST_TP_SCALE_MIN = 4,
+ WMI_HOST_TP_SCALE_SIZE = 5,
+};
+
+enum ath11k_scan_state {
+ ATH11K_SCAN_IDLE,
+ ATH11K_SCAN_STARTING,
+ ATH11K_SCAN_RUNNING,
+ ATH11K_SCAN_ABORTING,
+};
+
+enum ath11k_11d_state {
+ ATH11K_11D_IDLE,
+ ATH11K_11D_PREPARING,
+ ATH11K_11D_RUNNING,
+};
+
+enum ath11k_dev_flags {
+ ATH11K_CAC_RUNNING,
+ ATH11K_FLAG_CORE_REGISTERED,
+ ATH11K_FLAG_CRASH_FLUSH,
+ ATH11K_FLAG_RAW_MODE,
+ ATH11K_FLAG_HW_CRYPTO_DISABLED,
+ ATH11K_FLAG_BTCOEX,
+ ATH11K_FLAG_RECOVERY,
+ ATH11K_FLAG_UNREGISTERING,
+ ATH11K_FLAG_REGISTERED,
+ ATH11K_FLAG_QMI_FAIL,
+ ATH11K_FLAG_HTC_SUSPEND_COMPLETE,
+ ATH11K_FLAG_CE_IRQ_ENABLED,
+ ATH11K_FLAG_EXT_IRQ_ENABLED,
+ ATH11K_FLAG_FIXED_MEM_RGN,
+ ATH11K_FLAG_DEVICE_INIT_DONE,
+ ATH11K_FLAG_MULTI_MSI_VECTORS,
+ ATH11K_FLAG_FTM_SEGMENTED,
+};
+
+enum ath11k_monitor_flags {
+ ATH11K_FLAG_MONITOR_CONF_ENABLED,
+ ATH11K_FLAG_MONITOR_STARTED,
+ ATH11K_FLAG_MONITOR_VDEV_CREATED,
+};
+
+#define ATH11K_IPV6_UC_TYPE 0
+#define ATH11K_IPV6_AC_TYPE 1
+
+#define ATH11K_IPV6_MAX_COUNT 16
+#define ATH11K_IPV4_MAX_COUNT 2
+
+struct ath11k_arp_ns_offload {
+ u8 ipv4_addr[ATH11K_IPV4_MAX_COUNT][4];
+ u32 ipv4_count;
+ u32 ipv6_count;
+ u8 ipv6_addr[ATH11K_IPV6_MAX_COUNT][16];
+ u8 self_ipv6_addr[ATH11K_IPV6_MAX_COUNT][16];
+ u8 ipv6_type[ATH11K_IPV6_MAX_COUNT];
+ bool ipv6_valid[ATH11K_IPV6_MAX_COUNT];
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct ath11k_rekey_data {
+ u8 kck[NL80211_KCK_LEN];
+ u8 kek[NL80211_KCK_LEN];
+ u64 replay_ctr;
+ bool enable_offload;
+};
+
+/**
+ * struct ath11k_chan_power_info - TPE containing power info per channel chunk
+ * @chan_cfreq: channel center freq (MHz)
+ * e.g.
+ * channel 37/20 MHz, it is 6135
+ * channel 37/40 MHz, it is 6125
+ * channel 37/80 MHz, it is 6145
+ * channel 37/160 MHz, it is 6185
+ * @tx_power: transmit power (dBm)
+ */
+struct ath11k_chan_power_info {
+ u16 chan_cfreq;
+ s8 tx_power;
+};
+
+/* ath11k only deals with 160 MHz, so 8 subchannels */
+#define ATH11K_NUM_PWR_LEVELS 8
+
+/**
+ * struct ath11k_reg_tpc_power_info - regulatory TPC power info
+ * @is_psd_power: is PSD power or not
+ * @eirp_power: Maximum EIRP power (dBm), valid only if power is PSD
+ * @ap_power_type: type of power (SP/LPI/VLP)
+ * @num_pwr_levels: number of power levels
+ * @reg_max: Array of maximum TX power (dBm) per PSD value
+ * @tpe: TPE values processed from TPE IE
+ * @chan_power_info: power info to send to firmware
+ */
+struct ath11k_reg_tpc_power_info {
+ bool is_psd_power;
+ u8 eirp_power;
+ enum wmi_reg_6ghz_ap_type ap_power_type;
+ u8 num_pwr_levels;
+ u8 reg_max[ATH11K_NUM_PWR_LEVELS];
+ s8 tpe[ATH11K_NUM_PWR_LEVELS];
+ struct ath11k_chan_power_info chan_power_info[ATH11K_NUM_PWR_LEVELS];
+};
+
+struct ath11k_vif {
+ u32 vdev_id;
+ enum wmi_vdev_type vdev_type;
+ enum wmi_vdev_subtype vdev_subtype;
+ u32 beacon_interval;
+ u32 dtim_period;
+ u16 ast_hash;
+ u16 ast_idx;
+ u16 tcl_metadata;
+ u8 hal_addr_search_flags;
+ u8 search_type;
+
+ struct ath11k *ar;
+ struct ieee80211_vif *vif;
+
+ struct wmi_wmm_params_all_arg wmm_params;
+ struct wmi_wmm_params_all_arg muedca_params;
+ struct list_head list;
+ union {
+ struct {
+ u32 uapsd;
+ } sta;
+ struct {
+ /* 127 stations; wmi limit */
+ u8 tim_bitmap[16];
+ u8 tim_len;
+ u32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ bool hidden_ssid;
+ /* P2P_IE with NoA attribute for P2P_GO case */
+ u32 noa_len;
+ u8 *noa_data;
+ } ap;
+ } u;
+
+ bool is_started;
+ bool is_up;
+ bool ftm_responder;
+ bool spectral_enabled;
+ bool ps;
+ u32 aid;
+ u8 bssid[ETH_ALEN];
+ struct cfg80211_bitrate_mask bitrate_mask;
+ struct delayed_work connection_loss_work;
+ struct work_struct bcn_tx_work;
+ int num_legacy_stations;
+ int rtscts_prot_mode;
+ int txpower;
+ bool rsnie_present;
+ bool wpaie_present;
+ bool bcca_zero_sent;
+ bool do_not_send_tmpl;
+ struct ath11k_arp_ns_offload arp_ns_offload;
+ struct ath11k_rekey_data rekey_data;
+ u32 num_stations;
+ bool reinstall_group_keys;
+
+ struct ath11k_reg_tpc_power_info reg_tpc_info;
+
+ /* Must be last - ends in a flexible-array member.
+ *
+ * FIXME: Driver should not copy struct ieee80211_chanctx_conf,
+ * especially because it has a flexible array. Find a better way.
+ */
+ struct ieee80211_chanctx_conf chanctx;
+};
+
+struct ath11k_vif_iter {
+ u32 vdev_id;
+ struct ath11k_vif *arvif;
+};
+
+struct ath11k_rx_peer_stats {
+ u64 num_msdu;
+ u64 num_mpdu_fcs_ok;
+ u64 num_mpdu_fcs_err;
+ u64 tcp_msdu_count;
+ u64 udp_msdu_count;
+ u64 other_msdu_count;
+ u64 ampdu_msdu_count;
+ u64 non_ampdu_msdu_count;
+ u64 stbc_count;
+ u64 beamformed_count;
+ u64 mcs_count[HAL_RX_MAX_MCS + 1];
+ u64 nss_count[HAL_RX_MAX_NSS];
+ u64 bw_count[HAL_RX_BW_MAX];
+ u64 gi_count[HAL_RX_GI_MAX];
+ u64 coding_count[HAL_RX_SU_MU_CODING_MAX];
+ u64 tid_count[IEEE80211_NUM_TIDS + 1];
+ u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
+ u64 reception_type[HAL_RX_RECEPTION_TYPE_MAX];
+ u64 rx_duration;
+ u64 dcm_count;
+ u64 ru_alloc_cnt[HAL_RX_RU_ALLOC_TYPE_MAX];
+};
+
+#define ATH11K_HE_MCS_NUM 12
+#define ATH11K_VHT_MCS_NUM 10
+#define ATH11K_BW_NUM 4
+#define ATH11K_NSS_NUM 4
+#define ATH11K_LEGACY_NUM 12
+#define ATH11K_GI_NUM 4
+#define ATH11K_HT_MCS_NUM 32
+
+enum ath11k_pkt_rx_err {
+ ATH11K_PKT_RX_ERR_FCS,
+ ATH11K_PKT_RX_ERR_TKIP,
+ ATH11K_PKT_RX_ERR_CRYPT,
+ ATH11K_PKT_RX_ERR_PEER_IDX_INVAL,
+ ATH11K_PKT_RX_ERR_MAX,
+};
+
+enum ath11k_ampdu_subfrm_num {
+ ATH11K_AMPDU_SUBFRM_NUM_10,
+ ATH11K_AMPDU_SUBFRM_NUM_20,
+ ATH11K_AMPDU_SUBFRM_NUM_30,
+ ATH11K_AMPDU_SUBFRM_NUM_40,
+ ATH11K_AMPDU_SUBFRM_NUM_50,
+ ATH11K_AMPDU_SUBFRM_NUM_60,
+ ATH11K_AMPDU_SUBFRM_NUM_MORE,
+ ATH11K_AMPDU_SUBFRM_NUM_MAX,
+};
+
+enum ath11k_amsdu_subfrm_num {
+ ATH11K_AMSDU_SUBFRM_NUM_1,
+ ATH11K_AMSDU_SUBFRM_NUM_2,
+ ATH11K_AMSDU_SUBFRM_NUM_3,
+ ATH11K_AMSDU_SUBFRM_NUM_4,
+ ATH11K_AMSDU_SUBFRM_NUM_MORE,
+ ATH11K_AMSDU_SUBFRM_NUM_MAX,
+};
+
+enum ath11k_counter_type {
+ ATH11K_COUNTER_TYPE_BYTES,
+ ATH11K_COUNTER_TYPE_PKTS,
+ ATH11K_COUNTER_TYPE_MAX,
+};
+
+enum ath11k_stats_type {
+ ATH11K_STATS_TYPE_SUCC,
+ ATH11K_STATS_TYPE_FAIL,
+ ATH11K_STATS_TYPE_RETRY,
+ ATH11K_STATS_TYPE_AMPDU,
+ ATH11K_STATS_TYPE_MAX,
+};
+
+struct ath11k_htt_data_stats {
+ u64 legacy[ATH11K_COUNTER_TYPE_MAX][ATH11K_LEGACY_NUM];
+ u64 ht[ATH11K_COUNTER_TYPE_MAX][ATH11K_HT_MCS_NUM];
+ u64 vht[ATH11K_COUNTER_TYPE_MAX][ATH11K_VHT_MCS_NUM];
+ u64 he[ATH11K_COUNTER_TYPE_MAX][ATH11K_HE_MCS_NUM];
+ u64 bw[ATH11K_COUNTER_TYPE_MAX][ATH11K_BW_NUM];
+ u64 nss[ATH11K_COUNTER_TYPE_MAX][ATH11K_NSS_NUM];
+ u64 gi[ATH11K_COUNTER_TYPE_MAX][ATH11K_GI_NUM];
+};
+
+struct ath11k_htt_tx_stats {
+ struct ath11k_htt_data_stats stats[ATH11K_STATS_TYPE_MAX];
+ u64 tx_duration;
+ u64 ba_fails;
+ u64 ack_fails;
+};
+
+struct ath11k_per_ppdu_tx_stats {
+ u16 succ_pkts;
+ u16 failed_pkts;
+ u16 retry_pkts;
+ u32 succ_bytes;
+ u32 failed_bytes;
+ u32 retry_bytes;
+};
+
+DECLARE_EWMA(avg_rssi, 10, 8)
+
+struct ath11k_sta {
+ struct ath11k_vif *arvif;
+
+ /* the following are protected by ar->data_lock */
+ u32 changed; /* IEEE80211_RC_* */
+ u32 bw;
+ u32 nss;
+ u32 smps;
+ enum hal_pn_type pn_type;
+
+ struct work_struct update_wk;
+ struct work_struct set_4addr_wk;
+ struct rate_info txrate;
+ u32 peer_nss;
+ struct rate_info last_txrate;
+ u64 rx_duration;
+ u64 tx_duration;
+ u8 rssi_comb;
+ struct ewma_avg_rssi avg_rssi;
+ s8 rssi_beacon;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+ struct ath11k_htt_tx_stats *tx_stats;
+ struct ath11k_rx_peer_stats *rx_stats;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* protected by conf_mutex */
+ bool aggr_mode;
+#endif
+
+ bool use_4addr_set;
+ u16 tcl_metadata;
+
+ /* Protected with ar->data_lock */
+ enum ath11k_wmi_peer_ps_state peer_ps_state;
+ u64 ps_start_time;
+ u64 ps_start_jiffies;
+ u64 ps_total_duration;
+ bool peer_current_ps_valid;
+
+ u32 bw_prev;
+};
+
+#define ATH11K_MIN_5G_FREQ 4150
+#define ATH11K_MIN_6G_FREQ 5925
+#define ATH11K_MAX_6G_FREQ 7115
+#define ATH11K_NUM_CHANS 102
+#define ATH11K_MAX_5G_CHAN 177
+
+enum ath11k_state {
+ ATH11K_STATE_OFF,
+ ATH11K_STATE_ON,
+ ATH11K_STATE_RESTARTING,
+ ATH11K_STATE_RESTARTED,
+ ATH11K_STATE_WEDGED,
+ ATH11K_STATE_FTM,
+ /* Add other states as required */
+};
+
+/* Antenna noise floor */
+#define ATH11K_DEFAULT_NOISE_FLOOR -95
+
+#define ATH11K_INVALID_RSSI_FULL -1
+
+#define ATH11K_INVALID_RSSI_EMPTY -128
+
+struct ath11k_fw_stats {
+ struct dentry *debugfs_fwstats;
+ u32 pdev_id;
+ u32 stats_id;
+ struct list_head pdevs;
+ struct list_head vdevs;
+ struct list_head bcn;
+ u32 num_vdev_recvd;
+ u32 num_bcn_recvd;
+};
+
+struct ath11k_dbg_htt_stats {
+ u8 type;
+ u8 reset;
+ struct debug_htt_stats_req *stats_req;
+ /* protects shared stats req buffer */
+ spinlock_t lock;
+};
+
+#define MAX_MODULE_ID_BITMAP_WORDS 16
+
+struct ath11k_debug {
+ struct dentry *debugfs_pdev;
+ struct ath11k_dbg_htt_stats htt_stats;
+ u32 extd_tx_stats;
+ u32 extd_rx_stats;
+ u32 pktlog_filter;
+ u32 pktlog_mode;
+ u32 pktlog_peer_valid;
+ u8 pktlog_peer_addr[ETH_ALEN];
+ u32 rx_filter;
+ u32 mem_offset;
+ u32 module_id_bitmap[MAX_MODULE_ID_BITMAP_WORDS];
+ struct ath11k_debug_dbr *dbr_debug[WMI_DIRECT_BUF_MAX];
+};
+
+struct ath11k_per_peer_tx_stats {
+ u32 succ_bytes;
+ u32 retry_bytes;
+ u32 failed_bytes;
+ u16 succ_pkts;
+ u16 retry_pkts;
+ u16 failed_pkts;
+ u32 duration;
+ u8 ba_fails;
+ bool is_ampdu;
+};
+
+#define ATH11K_FLUSH_TIMEOUT (5 * HZ)
+#define ATH11K_VDEV_DELETE_TIMEOUT_HZ (5 * HZ)
+
+struct ath11k {
+ struct ath11k_base *ab;
+ struct ath11k_pdev *pdev;
+ struct ieee80211_hw *hw;
+ struct ath11k_pdev_wmi *wmi;
+ struct ath11k_pdev_dp dp;
+ u8 mac_addr[ETH_ALEN];
+ struct ath11k_he ar_he;
+ enum ath11k_state state;
+ bool supports_6ghz;
+ struct {
+ struct completion started;
+ struct completion completed;
+ struct completion on_channel;
+ struct delayed_work timeout;
+ enum ath11k_scan_state state;
+ bool is_roc;
+ int vdev_id;
+ int roc_freq;
+ bool roc_notify;
+ } scan;
+
+ struct {
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
+ struct ieee80211_sband_iftype_data
+ iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
+ } mac;
+
+ unsigned long dev_flags;
+ unsigned int filter_flags;
+ unsigned long monitor_flags;
+ u32 min_tx_power;
+ u32 max_tx_power;
+ u32 txpower_limit_2g;
+ u32 txpower_limit_5g;
+ u32 txpower_scale;
+ u32 power_scale;
+ u32 chan_tx_pwr;
+ u32 num_stations;
+ u32 max_num_stations;
+ /* To synchronize concurrent synchronous mac80211 callback operations,
+ * concurrent debugfs configuration and concurrent FW statistics events.
+ */
+ struct mutex conf_mutex;
+ /* protects the radio specific data like debug stats, ppdu_stats_info stats,
+ * vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info,
+ * channel context data, survey info, test mode data, channel_update_queue.
+ */
+ spinlock_t data_lock;
+
+ struct list_head arvifs;
+ /* should never be NULL; needed for regular htt rx */
+ struct ieee80211_channel *rx_channel;
+
+ /* valid during scan; needed for mgmt rx during scan */
+ struct ieee80211_channel *scan_channel;
+
+ u8 cfg_tx_chainmask;
+ u8 cfg_rx_chainmask;
+ u8 num_rx_chains;
+ u8 num_tx_chains;
+ /* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */
+ u8 pdev_idx;
+ u8 lmac_id;
+
+ struct completion peer_assoc_done;
+ struct completion peer_delete_done;
+
+ int install_key_status;
+ struct completion install_key_done;
+
+ int last_wmi_vdev_start_status;
+ struct completion vdev_setup_done;
+ struct completion vdev_delete_done;
+
+ int num_peers;
+ int max_num_peers;
+ u32 num_started_vdevs;
+ u32 num_created_vdevs;
+ unsigned long long allocated_vdev_map;
+
+ struct idr txmgmt_idr;
+ /* protects txmgmt_idr data */
+ spinlock_t txmgmt_idr_lock;
+ atomic_t num_pending_mgmt_tx;
+ wait_queue_head_t txmgmt_empty_waitq;
+
+ /* cycle count is reported twice for each visited channel during scan.
+ * access protected by data_lock
+ */
+ u32 survey_last_rx_clear_count;
+ u32 survey_last_cycle_count;
+
+ /* Channel info events are expected to come in pairs without and with
+ * COMPLETE flag set respectively for each channel visit during scan.
+ *
+ * However there are deviations from this rule. This flag is used to
+ * avoid reporting garbage data.
+ */
+ bool ch_info_can_report_survey;
+ struct survey_info survey[ATH11K_NUM_CHANS];
+ struct completion bss_survey_done;
+
+ struct work_struct regd_update_work;
+ struct work_struct channel_update_work;
+ /* protected with data_lock */
+ struct list_head channel_update_queue;
+
+ struct work_struct wmi_mgmt_tx_work;
+ struct sk_buff_head wmi_mgmt_tx_queue;
+
+ struct ath11k_wow wow;
+ struct completion target_suspend;
+ bool target_suspend_ack;
+ struct ath11k_per_peer_tx_stats peer_tx_stats;
+ struct list_head ppdu_stats_info;
+ u32 ppdu_stat_list_depth;
+
+ struct ath11k_per_peer_tx_stats cached_stats;
+ u32 last_ppdu_id;
+ u32 cached_ppdu_id;
+ int monitor_vdev_id;
+ struct completion fw_mode_reset;
+ u8 ftm_msgref;
+#ifdef CONFIG_ATH11K_DEBUGFS
+ struct ath11k_debug debug;
+#endif
+#ifdef CONFIG_ATH11K_SPECTRAL
+ struct ath11k_spectral spectral;
+#endif
+ bool dfs_block_radar_events;
+ struct ath11k_thermal thermal;
+ u32 vdev_id_11d_scan;
+ struct completion completed_11d_scan;
+ enum ath11k_11d_state state_11d;
+ bool regdom_set_by_user;
+ int hw_rate_code;
+ u8 twt_enabled;
+ bool nlo_enabled;
+ u8 alpha2[REG_ALPHA2_LEN + 1];
+ struct ath11k_fw_stats fw_stats;
+ struct completion fw_stats_complete;
+ struct completion fw_stats_done;
+
+ /* protected by conf_mutex */
+ bool ps_state_enable;
+ bool ps_timekeeper_enable;
+ s8 max_allowed_tx_power;
+};
+
+struct ath11k_band_cap {
+ u32 phy_id;
+ u32 max_bw_supported;
+ u32 ht_cap_info;
+ u32 he_cap_info[2];
+ u32 he_mcs;
+ u32 he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE];
+ struct ath11k_ppe_threshold he_ppet;
+ u16 he_6ghz_capa;
+};
+
+struct ath11k_pdev_cap {
+ u32 supported_bands;
+ u32 ampdu_density;
+ u32 vht_cap;
+ u32 vht_mcs;
+ u32 he_mcs;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 tx_chain_mask_shift;
+ u32 rx_chain_mask_shift;
+ struct ath11k_band_cap band[NUM_NL80211_BANDS];
+ bool nss_ratio_enabled;
+ u8 nss_ratio_info;
+};
+
+struct ath11k_pdev {
+ struct ath11k *ar;
+ u32 pdev_id;
+ struct ath11k_pdev_cap cap;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct ath11k_board_data {
+ const struct firmware *fw;
+ const void *data;
+ size_t len;
+};
+
+struct ath11k_pci_ops {
+ int (*wakeup)(struct ath11k_base *ab);
+ void (*release)(struct ath11k_base *ab);
+ int (*get_msi_irq)(struct ath11k_base *ab, unsigned int vector);
+ void (*window_write32)(struct ath11k_base *ab, u32 offset, u32 value);
+ u32 (*window_read32)(struct ath11k_base *ab, u32 offset);
+};
+
+/* IPQ8074 HW channel counters frequency value in hertz */
+#define IPQ8074_CC_FREQ_HERTZ 320000
+
+struct ath11k_bp_stats {
+ /* Head Pointer reported by the last HTT Backpressure event for the ring */
+ u16 hp;
+
+ /* Tail Pointer reported by the last HTT Backpressure event for the ring */
+ u16 tp;
+
+ /* Number of Backpressure events received for the ring */
+ u32 count;
+
+ /* Last recorded event timestamp */
+ unsigned long jiffies;
+};
+
+struct ath11k_dp_ring_bp_stats {
+ struct ath11k_bp_stats umac_ring_bp_stats[HTT_SW_UMAC_RING_IDX_MAX];
+ struct ath11k_bp_stats lmac_ring_bp_stats[HTT_SW_LMAC_RING_IDX_MAX][MAX_RADIOS];
+};
+
+struct ath11k_soc_dp_tx_err_stats {
+ /* TCL Ring Descriptor unavailable */
+ u32 desc_na[DP_TCL_NUM_RING_MAX];
+ /* Other failures during dp_tx due to mem allocation failure
+ * idr unavailable etc.
+ */
+ atomic_t misc_fail;
+};
+
+struct ath11k_soc_dp_stats {
+ u32 err_ring_pkts;
+ u32 invalid_rbm;
+ u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
+ u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
+ u32 hal_reo_error[DP_REO_DST_RING_MAX];
+ struct ath11k_soc_dp_tx_err_stats tx_err;
+ struct ath11k_dp_ring_bp_stats bp_stats;
+};
+
+struct ath11k_msi_user {
+ char *name;
+ int num_vectors;
+ u32 base_vector;
+};
+
+struct ath11k_msi_config {
+ int total_vectors;
+ int total_users;
+ struct ath11k_msi_user *users;
+ u16 hw_rev;
+};
+
+enum ath11k_pm_policy {
+ ATH11K_PM_DEFAULT,
+ ATH11K_PM_WOW,
+};
+
+/* Master structure to hold the hw data which may be used in core module */
+struct ath11k_base {
+ enum ath11k_hw_rev hw_rev;
+ enum ath11k_firmware_mode fw_mode;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct ath11k_qmi qmi;
+ struct ath11k_wmi_base wmi_ab;
+ struct completion fw_ready;
+ int num_radios;
+ /* HW channel counters frequency value in hertz common to all MACs */
+ u32 cc_freq_hz;
+
+ struct ath11k_dump_file_data *dump_data;
+ size_t ath11k_coredump_len;
+ struct work_struct dump_work;
+
+ struct ath11k_htc htc;
+
+ struct ath11k_dp dp;
+
+ void __iomem *mem;
+ void __iomem *mem_ce;
+ unsigned long mem_len;
+
+ struct {
+ enum ath11k_bus bus;
+ const struct ath11k_hif_ops *ops;
+ } hif;
+
+ struct {
+ struct completion wakeup_completed;
+ } wow;
+
+ struct ath11k_ce ce;
+ struct timer_list rx_replenish_retry;
+ struct ath11k_hal hal;
+ /* To synchronize core_start/core_stop */
+ struct mutex core_lock;
+ /* Protects data like peers */
+ spinlock_t base_lock;
+ struct ath11k_pdev pdevs[MAX_RADIOS];
+ struct {
+ enum WMI_HOST_WLAN_BAND supported_bands;
+ u32 pdev_id;
+ } target_pdev_ids[MAX_RADIOS];
+ u8 target_pdev_count;
+ struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS];
+ struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS];
+ unsigned long long free_vdev_map;
+
+ /* To synchronize rhash tbl write operation */
+ struct mutex tbl_mtx_lock;
+
+ /* The rhashtable containing struct ath11k_peer keyed by mac addr */
+ struct rhashtable *rhead_peer_addr;
+ struct rhashtable_params rhash_peer_addr_param;
+
+ /* The rhashtable containing struct ath11k_peer keyed by id */
+ struct rhashtable *rhead_peer_id;
+ struct rhashtable_params rhash_peer_id_param;
+
+ struct list_head peers;
+ wait_queue_head_t peer_mapping_wq;
+ u8 mac_addr[ETH_ALEN];
+ int irq_num[ATH11K_IRQ_NUM_MAX];
+ struct ath11k_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ struct ath11k_targ_cap target_caps;
+ u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE];
+ bool pdevs_macaddr_valid;
+
+ struct ath11k_hw_params hw_params;
+
+ const struct firmware *cal_file;
+
+ /* Below regd's are protected by ab->data_lock */
+ /* This is the regd set for every radio
+ * by the firmware during initialization
+ */
+ struct ieee80211_regdomain *default_regd[MAX_RADIOS];
+ /* This regd is set during dynamic country setting
+ * This may or may not be used during the runtime
+ */
+ struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+ struct cur_regulatory_info *reg_info_store;
+
+ /* Current DFS Regulatory */
+ enum ath11k_dfs_region dfs_region;
+#ifdef CONFIG_ATH11K_DEBUGFS
+ struct dentry *debugfs_soc;
+#endif
+ struct ath11k_soc_dp_stats soc_stats;
+
+ unsigned long dev_flags;
+ struct completion driver_recovery;
+ struct workqueue_struct *workqueue;
+ struct work_struct restart_work;
+ struct work_struct update_11d_work;
+ u8 new_alpha2[3];
+ struct workqueue_struct *workqueue_aux;
+ struct work_struct reset_work;
+ atomic_t reset_count;
+ atomic_t recovery_count;
+ atomic_t recovery_start_count;
+ bool is_reset;
+ struct completion reset_complete;
+ struct completion reconfigure_complete;
+ struct completion recovery_start;
+ /* continuous recovery fail count */
+ atomic_t fail_cont_count;
+ unsigned long reset_fail_timeout;
+ struct {
+ /* protected by data_lock */
+ u32 fw_crash_counter;
+ } stats;
+ u32 pktlog_defs_checksum;
+
+ struct ath11k_dbring_cap *db_caps;
+ u32 num_db_cap;
+
+ /* To synchronize 11d scan vdev id */
+ struct mutex vdev_id_11d_lock;
+ struct timer_list mon_reap_timer;
+
+ struct completion htc_suspend;
+
+ struct {
+ enum ath11k_bdf_search bdf_search;
+ u32 vendor;
+ u32 device;
+ u32 subsystem_vendor;
+ u32 subsystem_device;
+ } id;
+
+ struct {
+ struct {
+ const struct ath11k_msi_config *config;
+ u32 ep_base_data;
+ u32 irqs[32];
+ u32 addr_lo;
+ u32 addr_hi;
+ } msi;
+
+ const struct ath11k_pci_ops *ops;
+ } pci;
+
+ struct {
+ u32 api_version;
+
+ const struct firmware *fw;
+ const u8 *amss_data;
+ size_t amss_len;
+ const u8 *m3_data;
+ size_t m3_len;
+
+ DECLARE_BITMAP(fw_features, ATH11K_FW_FEATURE_COUNT);
+ } fw;
+
+ struct completion restart_completed;
+
+#ifdef CONFIG_NL80211_TESTMODE
+ struct {
+ u32 data_pos;
+ u32 expected_seq;
+ u8 *eventdata;
+ } testmode;
+#endif
+
+ enum ath11k_pm_policy pm_policy;
+ enum ath11k_pm_policy actual_pm_policy;
+ struct notifier_block pm_nb;
+
+ /* must be last */
+ u8 drv_priv[] __aligned(sizeof(void *));
+};
+
+struct ath11k_fw_stats_pdev {
+ struct list_head list;
+
+ /* PDEV stats */
+ s32 ch_noise_floor;
+ /* Cycles spent transmitting frames */
+ u32 tx_frame_count;
+ /* Cycles spent receiving frames */
+ u32 rx_frame_count;
+ /* Total channel busy time, evidently */
+ u32 rx_clear_count;
+ /* Total on-channel time */
+ u32 cycle_count;
+ u32 phy_err_count;
+ u32 chan_tx_power;
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+
+ /* PDEV TX stats */
+ /* Num HTT cookies queued to dispatch list */
+ s32 comp_queued;
+ /* Num HTT cookies dispatched */
+ s32 comp_delivered;
+ /* Num MSDU queued to WAL */
+ s32 msdu_enqued;
+ /* Num MPDU queue to WAL */
+ s32 mpdu_enqued;
+ /* Num MSDUs dropped by WMM limit */
+ s32 wmm_drop;
+ /* Num Local frames queued */
+ s32 local_enqued;
+ /* Num Local frames done */
+ s32 local_freed;
+ /* Num queued to HW */
+ s32 hw_queued;
+ /* Num PPDU reaped from HW */
+ s32 hw_reaped;
+ /* Num underruns */
+ s32 underrun;
+ /* Num hw paused */
+ u32 hw_paused;
+ /* Num PPDUs cleaned up in TX abort */
+ s32 tx_abort;
+ /* Num MPDUs requeued by SW */
+ s32 mpdus_requeued;
+ /* excessive retries */
+ u32 tx_ko;
+ u32 tx_xretry;
+ /* data hw rate code */
+ u32 data_rc;
+ /* Scheduler self triggers */
+ u32 self_triggers;
+ /* frames dropped due to excessive sw retries */
+ u32 sw_retry_failure;
+ /* illegal rate phy errors */
+ u32 illgl_rate_phy_err;
+ /* wal pdev continuous xretry */
+ u32 pdev_cont_xretry;
+ /* wal pdev tx timeouts */
+ u32 pdev_tx_timeout;
+ /* wal pdev resets */
+ u32 pdev_resets;
+ /* frames dropped due to non-availability of stateless TIDs */
+ u32 stateless_tid_alloc_failure;
+ /* PhY/BB underrun */
+ u32 phy_underrun;
+ /* MPDU is more than txop limit */
+ u32 txop_ovf;
+ /* Num sequences posted */
+ u32 seq_posted;
+ /* Num sequences failed in queueing */
+ u32 seq_failed_queueing;
+ /* Num sequences completed */
+ u32 seq_completed;
+ /* Num sequences restarted */
+ u32 seq_restarted;
+ /* Num of MU sequences posted */
+ u32 mu_seq_posted;
+ /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT
+ * (Reset,channel change)
+ */
+ s32 mpdus_sw_flush;
+ /* Num MPDUs filtered by HW, all filter condition (TTL expired) */
+ s32 mpdus_hw_filter;
+ /* Num MPDUs truncated by PDG (TXOP, TBTT,
+ * PPDU_duration based on rate, dyn_bw)
+ */
+ s32 mpdus_truncated;
+ /* Num MPDUs that was tried but didn't receive ACK or BA */
+ s32 mpdus_ack_failed;
+ /* Num MPDUs that was dropped du to expiry. */
+ s32 mpdus_expired;
+
+ /* PDEV RX stats */
+ /* Cnts any change in ring routing mid-ppdu */
+ s32 mid_ppdu_route_change;
+ /* Total number of statuses processed */
+ s32 status_rcvd;
+ /* Extra frags on rings 0-3 */
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+ /* MSDUs / MPDUs delivered to HTT */
+ s32 htt_msdus;
+ s32 htt_mpdus;
+ /* MSDUs / MPDUs delivered to local stack */
+ s32 loc_msdus;
+ s32 loc_mpdus;
+ /* AMSDUs that have more MSDUs than the status ring size */
+ s32 oversize_amsdu;
+ /* Number of PHY errors */
+ s32 phy_errs;
+ /* Number of PHY errors drops */
+ s32 phy_err_drop;
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ s32 mpdu_errs;
+ /* Num overflow errors */
+ s32 rx_ovfl_errs;
+};
+
+struct ath11k_fw_stats_vdev {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[WLAN_MAX_AC];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[WLAN_MAX_AC];
+ u32 num_tx_frames_failures[WLAN_MAX_AC];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[MAX_TX_RATE_VALUES];
+ u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+};
+
+struct ath11k_fw_stats_bcn {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 tx_bcn_succ_cnt;
+ u32 tx_bcn_outage_cnt;
+};
+
+void ath11k_fw_stats_init(struct ath11k *ar);
+void ath11k_fw_stats_pdevs_free(struct list_head *head);
+void ath11k_fw_stats_vdevs_free(struct list_head *head);
+void ath11k_fw_stats_bcn_free(struct list_head *head);
+void ath11k_fw_stats_free(struct ath11k_fw_stats *stats);
+
+extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[];
+
+extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[];
+
+extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq5018[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq5018[];
+
+extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[];
+int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab);
+int ath11k_core_pre_init(struct ath11k_base *ab);
+int ath11k_core_init(struct ath11k_base *ath11k);
+void ath11k_core_deinit(struct ath11k_base *ath11k);
+struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
+ enum ath11k_bus bus);
+void ath11k_core_free(struct ath11k_base *ath11k);
+int ath11k_core_fetch_bdf(struct ath11k_base *ath11k,
+ struct ath11k_board_data *bd);
+int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd);
+int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const char *name);
+void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
+int ath11k_core_check_dt(struct ath11k_base *ath11k);
+int ath11k_core_check_smbios(struct ath11k_base *ab);
+void ath11k_core_halt(struct ath11k *ar);
+int ath11k_core_resume_early(struct ath11k_base *ab);
+int ath11k_core_resume(struct ath11k_base *ab);
+int ath11k_core_suspend(struct ath11k_base *ab);
+int ath11k_core_suspend_late(struct ath11k_base *ab);
+void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab);
+bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab);
+
+const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
+ const char *filename);
+
+static inline const char *ath11k_scan_state_str(enum ath11k_scan_state state)
+{
+ switch (state) {
+ case ATH11K_SCAN_IDLE:
+ return "idle";
+ case ATH11K_SCAN_STARTING:
+ return "starting";
+ case ATH11K_SCAN_RUNNING:
+ return "running";
+ case ATH11K_SCAN_ABORTING:
+ return "aborting";
+ }
+
+ return "unknown";
+}
+
+static inline struct ath11k_skb_cb *ATH11K_SKB_CB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath11k_skb_cb) >
+ IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+ return (struct ath11k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
+}
+
+static inline struct ath11k_skb_rxcb *ATH11K_SKB_RXCB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath11k_skb_rxcb) > sizeof(skb->cb));
+ return (struct ath11k_skb_rxcb *)skb->cb;
+}
+
+static inline struct ath11k_vif *ath11k_vif_to_arvif(struct ieee80211_vif *vif)
+{
+ return (struct ath11k_vif *)vif->drv_priv;
+}
+
+static inline struct ath11k_sta *ath11k_sta_to_arsta(struct ieee80211_sta *sta)
+{
+ return (struct ath11k_sta *)sta->drv_priv;
+}
+
+static inline struct ath11k *ath11k_ab_to_ar(struct ath11k_base *ab,
+ int mac_id)
+{
+ return ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
+}
+
+static inline void ath11k_core_create_firmware_path(struct ath11k_base *ab,
+ const char *filename,
+ void *buf, size_t buf_len)
+{
+ const char *fw_name = NULL;
+
+ of_property_read_string(ab->dev->of_node, "firmware-name", &fw_name);
+
+ if (fw_name && strncmp(filename, "board", 5))
+ snprintf(buf, buf_len, "%s/%s/%s/%s", ATH11K_FW_DIR,
+ ab->hw_params.fw.dir, fw_name, filename);
+ else
+ snprintf(buf, buf_len, "%s/%s/%s", ATH11K_FW_DIR,
+ ab->hw_params.fw.dir, filename);
+}
+
+static inline const char *ath11k_bus_str(enum ath11k_bus bus)
+{
+ switch (bus) {
+ case ATH11K_BUS_PCI:
+ return "pci";
+ case ATH11K_BUS_AHB:
+ return "ahb";
+ }
+
+ return "unknown";
+}
+
+void ath11k_core_pm_notifier_unregister(struct ath11k_base *ab);
+
+#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/coredump.c b/drivers/net/wireless/ath/ath11k/coredump.c
new file mode 100644
index 000000000000..1949d57b007a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/coredump.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+#include <linux/devcoredump.h>
+#include <linux/export.h>
+#include "hif.h"
+#include "coredump.h"
+#include "debug.h"
+
+enum
+ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type)
+{
+ enum ath11k_fw_crash_dump_type dump_type;
+
+ switch (type) {
+ case HOST_DDR_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_REMOTE_MEM_DATA;
+ break;
+ case M3_DUMP_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_M3_DUMP;
+ break;
+ case PAGEABLE_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_PAGEABLE_DATA;
+ break;
+ case BDF_MEM_REGION_TYPE:
+ case CALDB_MEM_REGION_TYPE:
+ dump_type = FW_CRASH_DUMP_NONE;
+ break;
+ default:
+ dump_type = FW_CRASH_DUMP_TYPE_MAX;
+ break;
+ }
+
+ return dump_type;
+}
+EXPORT_SYMBOL(ath11k_coredump_get_dump_type);
+
+void ath11k_coredump_upload(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, dump_work);
+
+ ath11k_info(ab, "Uploading coredump\n");
+ /* dev_coredumpv() takes ownership of the buffer */
+ dev_coredumpv(ab->dev, ab->dump_data, ab->ath11k_coredump_len, GFP_KERNEL);
+ ab->dump_data = NULL;
+}
+
+void ath11k_coredump_collect(struct ath11k_base *ab)
+{
+ ath11k_hif_coredump_download(ab);
+}
diff --git a/drivers/net/wireless/ath/ath11k/coredump.h b/drivers/net/wireless/ath/ath11k/coredump.h
new file mode 100644
index 000000000000..3960d9385261
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/coredump.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _ATH11K_COREDUMP_H_
+#define _ATH11K_COREDUMP_H_
+
+#define ATH11K_FW_CRASH_DUMP_V2 2
+
+enum ath11k_fw_crash_dump_type {
+ FW_CRASH_DUMP_PAGING_DATA,
+ FW_CRASH_DUMP_RDDM_DATA,
+ FW_CRASH_DUMP_REMOTE_MEM_DATA,
+ FW_CRASH_DUMP_PAGEABLE_DATA,
+ FW_CRASH_DUMP_M3_DUMP,
+ FW_CRASH_DUMP_NONE,
+
+ /* keep last */
+ FW_CRASH_DUMP_TYPE_MAX,
+};
+
+#define COREDUMP_TLV_HDR_SIZE 8
+
+struct ath11k_tlv_dump_data {
+ /* see ath11k_fw_crash_dump_type above */
+ __le32 type;
+
+ /* in bytes */
+ __le32 tlv_len;
+
+ /* pad to 32-bit boundaries as needed */
+ u8 tlv_data[];
+} __packed;
+
+struct ath11k_dump_file_data {
+ /* "ATH11K-FW-DUMP" */
+ char df_magic[16];
+ /* total dump len in bytes */
+ __le32 len;
+ /* file dump version */
+ __le32 version;
+ /* pci device id */
+ __le32 chip_id;
+ /* qrtr instance id */
+ __le32 qrtr_id;
+ /* pci domain id */
+ __le32 bus_id;
+ guid_t guid;
+ /* time-of-day stamp */
+ __le64 tv_sec;
+ /* time-of-day stamp, nano-seconds */
+ __le64 tv_nsec;
+ /* room for growth w/out changing binary format */
+ u8 unused[128];
+ u8 data[];
+} __packed;
+
+#ifdef CONFIG_DEV_COREDUMP
+enum ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type);
+void ath11k_coredump_upload(struct work_struct *work);
+void ath11k_coredump_collect(struct ath11k_base *ab);
+#else
+static inline enum
+ath11k_fw_crash_dump_type ath11k_coredump_get_dump_type(int type)
+{
+ return FW_CRASH_DUMP_TYPE_MAX;
+}
+
+static inline void ath11k_coredump_upload(struct work_struct *work)
+{
+}
+
+static inline void ath11k_coredump_collect(struct ath11k_base *ab)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
new file mode 100644
index 000000000000..520d8b8662a2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "core.h"
+#include "debug.h"
+
+#define ATH11K_DB_MAGIC_VALUE 0xdeadbeaf
+
+int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
+{
+ u32 *temp;
+ int idx;
+
+ size = size >> 2;
+
+ for (idx = 0, temp = buffer; idx < size; idx++, temp++) {
+ if (*temp == ATH11K_DB_MAGIC_VALUE)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
+ void *buffer, u32 size)
+{
+ /* memset32 function fills buffer payload with the ATH11K_DB_MAGIC_VALUE
+ * and the variable size is expected to be the number of u32 values
+ * to be stored, not the number of bytes.
+ */
+ size = size / sizeof(u32);
+
+ memset32(buffer, ATH11K_DB_MAGIC_VALUE, size);
+}
+
+static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ struct ath11k_dbring_element *buff,
+ enum wmi_direct_buffer_module id)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct hal_srng *srng;
+ dma_addr_t paddr;
+ void *ptr_aligned, *ptr_unaligned, *desc;
+ int ret;
+ int buf_id;
+ u32 cookie;
+
+ srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+
+ lockdep_assert_held(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ ptr_unaligned = buff->payload;
+ ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
+ ath11k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz);
+ paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
+ DMA_FROM_DEVICE);
+
+ ret = dma_mapping_error(ab->dev, paddr);
+ if (ret)
+ goto err;
+
+ spin_lock_bh(&ring->idr_lock);
+ buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC);
+ spin_unlock_bh(&ring->idr_lock);
+ if (buf_id < 0) {
+ ret = -ENOBUFS;
+ goto err_dma_unmap;
+ }
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOENT;
+ goto err_idr_remove;
+ }
+
+ buff->paddr = paddr;
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
+
+ ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
+ ath11k_hal_srng_access_end(ab, srng);
+
+ return 0;
+
+err_idr_remove:
+ spin_lock_bh(&ring->idr_lock);
+ idr_remove(&ring->bufs_idr, buf_id);
+ spin_unlock_bh(&ring->idr_lock);
+err_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, ring->buf_sz,
+ DMA_FROM_DEVICE);
+err:
+ ath11k_hal_srng_access_end(ab, srng);
+ return ret;
+}
+
+static int ath11k_dbring_fill_bufs(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ enum wmi_direct_buffer_module id)
+{
+ struct ath11k_dbring_element *buff;
+ struct hal_srng *srng;
+ int num_remain, req_entries, num_free;
+ u32 align;
+ int size, ret;
+
+ srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true);
+ req_entries = min(num_free, ring->bufs_max);
+ num_remain = req_entries;
+ align = ring->buf_align;
+ size = ring->buf_sz + align - 1;
+
+ while (num_remain > 0) {
+ buff = kzalloc(sizeof(*buff), GFP_ATOMIC);
+ if (!buff)
+ break;
+
+ buff->payload = kzalloc(size, GFP_ATOMIC);
+ if (!buff->payload) {
+ kfree(buff);
+ break;
+ }
+ ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
+ num_remain, req_entries);
+ kfree(buff->payload);
+ kfree(buff);
+ break;
+ }
+ num_remain--;
+ }
+
+ spin_unlock_bh(&srng->lock);
+
+ return num_remain;
+}
+
+int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ enum wmi_direct_buffer_module id)
+{
+ struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {};
+ int ret;
+
+ if (id >= WMI_DIRECT_BUF_MAX)
+ return -EINVAL;
+
+ param.pdev_id = DP_SW2HW_MACID(ring->pdev_id);
+ param.module_id = id;
+ param.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr);
+ param.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr);
+ param.head_idx_paddr_lo = lower_32_bits(ring->hp_addr);
+ param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
+ param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
+ param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
+ param.num_elems = ring->bufs_max;
+ param.buf_size = ring->buf_sz;
+ param.num_resp_per_event = ring->num_resp_per_event;
+ param.event_timeout_ms = ring->event_timeout_ms;
+
+ ret = ath11k_wmi_pdev_dma_ring_cfg(ar, &param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring,
+ u32 num_resp_per_event, u32 event_timeout_ms,
+ int (*handler)(struct ath11k *,
+ struct ath11k_dbring_data *))
+{
+ if (WARN_ON(!ring))
+ return -EINVAL;
+
+ ring->num_resp_per_event = num_resp_per_event;
+ ring->event_timeout_ms = event_timeout_ms;
+ ring->handler = handler;
+
+ return 0;
+}
+
+int ath11k_dbring_buf_setup(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ struct ath11k_dbring_cap *db_cap)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct hal_srng *srng;
+ int ret;
+
+ srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+ ring->bufs_max = ring->refill_srng.size /
+ ath11k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
+
+ ring->buf_sz = db_cap->min_buf_sz;
+ ring->buf_align = db_cap->min_buf_align;
+ ring->pdev_id = db_cap->pdev_id;
+ ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
+ ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
+
+ ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
+
+ return ret;
+}
+
+int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
+ int ring_num, int num_entries)
+{
+ int ret;
+
+ ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
+ ring_num, ar->pdev_idx, num_entries);
+ if (ret < 0) {
+ ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ring_num);
+ goto err;
+ }
+
+ return 0;
+err:
+ ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
+ return ret;
+}
+
+int ath11k_dbring_get_cap(struct ath11k_base *ab,
+ u8 pdev_idx,
+ enum wmi_direct_buffer_module id,
+ struct ath11k_dbring_cap *db_cap)
+{
+ int i;
+
+ if (!ab->num_db_cap || !ab->db_caps)
+ return -ENOENT;
+
+ if (id >= WMI_DIRECT_BUF_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < ab->num_db_cap; i++) {
+ if (pdev_idx == ab->db_caps[i].pdev_id &&
+ id == ab->db_caps[i].id) {
+ *db_cap = ab->db_caps[i];
+
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
+ struct ath11k_dbring_buf_release_event *ev)
+{
+ struct ath11k_dbring *ring;
+ struct hal_srng *srng;
+ struct ath11k *ar;
+ struct ath11k_dbring_element *buff;
+ struct ath11k_dbring_data handler_data;
+ struct ath11k_buffer_addr desc;
+ u8 *vaddr_unalign;
+ u32 num_entry, num_buff_reaped;
+ u8 pdev_idx, rbm, module_id;
+ u32 cookie;
+ int buf_id;
+ int size;
+ dma_addr_t paddr;
+ int ret = 0;
+
+ pdev_idx = ev->fixed.pdev_id;
+ module_id = ev->fixed.module_id;
+
+ if (pdev_idx >= ab->num_radios) {
+ ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
+ return -EINVAL;
+ }
+
+ if (ev->fixed.num_buf_release_entry !=
+ ev->fixed.num_meta_data_entry) {
+ ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
+ ev->fixed.num_buf_release_entry,
+ ev->fixed.num_meta_data_entry);
+ return -EINVAL;
+ }
+
+ ar = ab->pdevs[pdev_idx].ar;
+
+ rcu_read_lock();
+ if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
+ ret = -EINVAL;
+ goto rcu_unlock;
+ }
+
+ switch (ev->fixed.module_id) {
+ case WMI_DIRECT_BUF_SPECTRAL:
+ ring = ath11k_spectral_get_dbring(ar);
+ break;
+ default:
+ ring = NULL;
+ ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
+ ev->fixed.module_id);
+ break;
+ }
+
+ if (!ring) {
+ ret = -EINVAL;
+ goto rcu_unlock;
+ }
+
+ srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+ num_entry = ev->fixed.num_buf_release_entry;
+ size = ring->buf_sz + ring->buf_align - 1;
+ num_buff_reaped = 0;
+
+ spin_lock_bh(&srng->lock);
+
+ while (num_buff_reaped < num_entry) {
+ desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
+ desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
+ handler_data.meta = ev->meta_data[num_buff_reaped];
+
+ num_buff_reaped++;
+
+ ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
+
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
+
+ spin_lock_bh(&ring->idr_lock);
+ buff = idr_find(&ring->bufs_idr, buf_id);
+ if (!buff) {
+ spin_unlock_bh(&ring->idr_lock);
+ continue;
+ }
+ idr_remove(&ring->bufs_idr, buf_id);
+ spin_unlock_bh(&ring->idr_lock);
+
+ dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
+ DMA_FROM_DEVICE);
+
+ ath11k_debugfs_add_dbring_entry(ar, module_id,
+ ATH11K_DBG_DBR_EVENT_RX, srng);
+
+ if (ring->handler) {
+ vaddr_unalign = buff->payload;
+ handler_data.data = PTR_ALIGN(vaddr_unalign,
+ ring->buf_align);
+ handler_data.data_sz = ring->buf_sz;
+
+ ring->handler(ar, &handler_data);
+ }
+
+ buff->paddr = 0;
+ memset(buff->payload, 0, size);
+ ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
+ }
+
+ spin_unlock_bh(&srng->lock);
+
+rcu_unlock:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
+{
+ ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
+}
+
+void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
+{
+ struct ath11k_dbring_element *buff;
+ int buf_id;
+
+ spin_lock_bh(&ring->idr_lock);
+ idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
+ idr_remove(&ring->bufs_idr, buf_id);
+ dma_unmap_single(ar->ab->dev, buff->paddr,
+ ring->buf_sz, DMA_FROM_DEVICE);
+ kfree(buff->payload);
+ kfree(buff);
+ }
+
+ idr_destroy(&ring->bufs_idr);
+ spin_unlock_bh(&ring->idr_lock);
+}
diff --git a/drivers/net/wireless/ath/ath11k/dbring.h b/drivers/net/wireless/ath/ath11k/dbring.h
new file mode 100644
index 000000000000..2f93b78a50df
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dbring.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_DBRING_H
+#define ATH11K_DBRING_H
+
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/spinlock.h>
+#include "dp.h"
+
+struct ath11k_dbring_element {
+ dma_addr_t paddr;
+ u8 *payload;
+};
+
+struct ath11k_dbring_data {
+ void *data;
+ u32 data_sz;
+ struct wmi_dma_buf_release_meta_data meta;
+};
+
+struct ath11k_dbring_buf_release_event {
+ struct ath11k_wmi_dma_buf_release_fixed_param fixed;
+ struct wmi_dma_buf_release_entry *buf_entry;
+ struct wmi_dma_buf_release_meta_data *meta_data;
+ u32 num_buf_entry;
+ u32 num_meta;
+};
+
+struct ath11k_dbring_cap {
+ u32 pdev_id;
+ enum wmi_direct_buffer_module id;
+ u32 min_elem;
+ u32 min_buf_sz;
+ u32 min_buf_align;
+};
+
+struct ath11k_dbring {
+ struct dp_srng refill_srng;
+ struct idr bufs_idr;
+ /* Protects bufs_idr */
+ spinlock_t idr_lock;
+ dma_addr_t tp_addr;
+ dma_addr_t hp_addr;
+ int bufs_max;
+ u32 pdev_id;
+ u32 buf_sz;
+ u32 buf_align;
+ u32 num_resp_per_event;
+ u32 event_timeout_ms;
+ int (*handler)(struct ath11k *, struct ath11k_dbring_data *);
+};
+
+int ath11k_dbring_set_cfg(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ u32 num_resp_per_event,
+ u32 event_timeout_ms,
+ int (*handler)(struct ath11k *,
+ struct ath11k_dbring_data *));
+int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ enum wmi_direct_buffer_module id);
+int ath11k_dbring_buf_setup(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ struct ath11k_dbring_cap *db_cap);
+int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
+ int ring_num, int num_entries);
+int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
+ struct ath11k_dbring_buf_release_event *ev);
+int ath11k_dbring_get_cap(struct ath11k_base *ab,
+ u8 pdev_idx,
+ enum wmi_direct_buffer_module id,
+ struct ath11k_dbring_cap *db_cap);
+void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring);
+void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring);
+int ath11k_dbring_validate_buffer(struct ath11k *ar, void *data, u32 size);
+
+#endif /* ATH11K_DBRING_H */
diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c
new file mode 100644
index 000000000000..37d23a559ba3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/export.h>
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "debug.h"
+
+void ath11k_info(struct ath11k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_info(ab->dev, "%pV", &vaf);
+ trace_ath11k_log_info(ab, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(ath11k_info);
+
+void ath11k_err(struct ath11k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_err(ab->dev, "%pV", &vaf);
+ trace_ath11k_log_err(ab, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(ath11k_err);
+
+void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_warn_ratelimited(ab->dev, "%pV", &vaf);
+ trace_ath11k_log_warn(ab, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(ath11k_warn);
+
+#ifdef CONFIG_ATH11K_DEBUG
+
+void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (ath11k_debug_mask & mask)
+ dev_printk(KERN_DEBUG, ab->dev, "%s %pV", ath11k_dbg_str(mask), &vaf);
+
+ trace_ath11k_log_dbg(ab, mask, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(__ath11k_dbg);
+
+void ath11k_dbg_dump(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+ char linebuf[256];
+ size_t linebuflen;
+ const void *ptr;
+
+ if (ath11k_debug_mask & mask) {
+ if (msg)
+ __ath11k_dbg(ab, mask, "%s\n", msg);
+
+ for (ptr = buf; (ptr - buf) < len; ptr += 16) {
+ linebuflen = 0;
+ linebuflen += scnprintf(linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen,
+ "%s%08x: ",
+ (prefix ? prefix : ""),
+ (unsigned int)(ptr - buf));
+ hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
+ linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen, true);
+ dev_printk(KERN_DEBUG, ab->dev, "%s\n", linebuf);
+ }
+ }
+
+ /* tracing code doesn't like null strings */
+ trace_ath11k_log_dbg_dump(ab, msg ? msg : "", prefix ? prefix : "",
+ buf, len);
+}
+EXPORT_SYMBOL(ath11k_dbg_dump);
+
+#endif /* CONFIG_ATH11K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
new file mode 100644
index 000000000000..cc8934d15697
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH11K_DEBUG_H_
+#define _ATH11K_DEBUG_H_
+
+#include "trace.h"
+#include "debugfs.h"
+
+enum ath11k_debug_mask {
+ ATH11K_DBG_AHB = 0x00000001,
+ ATH11K_DBG_WMI = 0x00000002,
+ ATH11K_DBG_HTC = 0x00000004,
+ ATH11K_DBG_DP_HTT = 0x00000008,
+ ATH11K_DBG_MAC = 0x00000010,
+ ATH11K_DBG_BOOT = 0x00000020,
+ ATH11K_DBG_QMI = 0x00000040,
+ ATH11K_DBG_DATA = 0x00000080,
+ ATH11K_DBG_MGMT = 0x00000100,
+ ATH11K_DBG_REG = 0x00000200,
+ ATH11K_DBG_TESTMODE = 0x00000400,
+ ATH11K_DBG_HAL = 0x00000800,
+ ATH11K_DBG_PCI = 0x00001000,
+ ATH11K_DBG_DP_TX = 0x00002000,
+ ATH11K_DBG_DP_RX = 0x00004000,
+ ATH11K_DBG_CE = 0x00008000,
+};
+
+static inline const char *ath11k_dbg_str(enum ath11k_debug_mask mask)
+{
+ switch (mask) {
+ case ATH11K_DBG_AHB:
+ return "ahb";
+ case ATH11K_DBG_WMI:
+ return "wmi";
+ case ATH11K_DBG_HTC:
+ return "htc";
+ case ATH11K_DBG_DP_HTT:
+ return "dp_htt";
+ case ATH11K_DBG_MAC:
+ return "mac";
+ case ATH11K_DBG_BOOT:
+ return "boot";
+ case ATH11K_DBG_QMI:
+ return "qmi";
+ case ATH11K_DBG_DATA:
+ return "data";
+ case ATH11K_DBG_MGMT:
+ return "mgmt";
+ case ATH11K_DBG_REG:
+ return "reg";
+ case ATH11K_DBG_TESTMODE:
+ return "testmode";
+ case ATH11K_DBG_HAL:
+ return "hal";
+ case ATH11K_DBG_PCI:
+ return "pci";
+ case ATH11K_DBG_DP_TX:
+ return "dp_tx";
+ case ATH11K_DBG_DP_RX:
+ return "dp_rx";
+ case ATH11K_DBG_CE:
+ return "ce";
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled
+ */
+ }
+
+ return "<?>";
+}
+
+__printf(2, 3) void ath11k_info(struct ath11k_base *ab, const char *fmt, ...);
+__printf(2, 3) void ath11k_err(struct ath11k_base *ab, const char *fmt, ...);
+__printf(2, 3) void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...);
+
+extern unsigned int ath11k_debug_mask;
+
+#ifdef CONFIG_ATH11K_DEBUG
+__printf(3, 4) void __ath11k_dbg(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *fmt, ...);
+void ath11k_dbg_dump(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len);
+#else /* CONFIG_ATH11K_DEBUG */
+static inline int __ath11k_dbg(struct ath11k_base *ab,
+ enum ath11k_debug_mask dbg_mask,
+ const char *fmt, ...)
+{
+ return 0;
+}
+
+static inline void ath11k_dbg_dump(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+}
+#endif /* CONFIG_ATH11K_DEBUG */
+
+#define ath11k_dbg(ar, dbg_mask, fmt, ...) \
+do { \
+ if ((ath11k_debug_mask & dbg_mask) || \
+ trace_ath11k_log_dbg_enabled()) \
+ __ath11k_dbg(ar, dbg_mask, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#endif /* _ATH11K_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
new file mode 100644
index 000000000000..977f945b6e66
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -0,0 +1,1803 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/export.h>
+#include <linux/vmalloc.h>
+
+#include "debugfs.h"
+
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "hal_rx.h"
+#include "dp_tx.h"
+#include "debugfs_htt_stats.h"
+#include "peer.h"
+#include "hif.h"
+
+static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = {
+ "REO2SW1_RING",
+ "REO2SW2_RING",
+ "REO2SW3_RING",
+ "REO2SW4_RING",
+ "WBM2REO_LINK_RING",
+ "REO2TCL_RING",
+ "REO2FW_RING",
+ "RELEASE_RING",
+ "PPE_RELEASE_RING",
+ "TCL2TQM_RING",
+ "TQM_RELEASE_RING",
+ "REO_RELEASE_RING",
+ "WBM2SW0_RELEASE_RING",
+ "WBM2SW1_RELEASE_RING",
+ "WBM2SW2_RELEASE_RING",
+ "WBM2SW3_RELEASE_RING",
+ "REO_CMD_RING",
+ "REO_STATUS_RING",
+};
+
+static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = {
+ "FW2RXDMA_BUF_RING",
+ "FW2RXDMA_STATUS_RING",
+ "FW2RXDMA_LINK_RING",
+ "SW2RXDMA_BUF_RING",
+ "WBM2RXDMA_LINK_RING",
+ "RXDMA2FW_RING",
+ "RXDMA2SW_RING",
+ "RXDMA2RELEASE_RING",
+ "RXDMA2REO_RING",
+ "MONITOR_STATUS_RING",
+ "MONITOR_BUF_RING",
+ "MONITOR_DESC_RING",
+ "MONITOR_DEST_RING",
+};
+
+void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+ struct hal_srng *srng)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_data;
+ struct ath11k_dbg_dbr_entry *entry;
+
+ if (id >= WMI_DIRECT_BUF_MAX || event >= ATH11K_DBG_DBR_EVENT_MAX)
+ return;
+
+ dbr_debug = ar->debug.dbr_debug[id];
+ if (!dbr_debug)
+ return;
+
+ if (!dbr_debug->dbr_debug_enabled)
+ return;
+
+ dbr_data = &dbr_debug->dbr_dbg_data;
+
+ spin_lock_bh(&dbr_data->lock);
+
+ if (dbr_data->entries) {
+ entry = &dbr_data->entries[dbr_data->dbr_debug_idx];
+ entry->hp = srng->u.src_ring.hp;
+ entry->tp = *srng->u.src_ring.tp_addr;
+ entry->timestamp = jiffies;
+ entry->event = event;
+
+ dbr_data->dbr_debug_idx++;
+ if (dbr_data->dbr_debug_idx ==
+ dbr_data->num_ring_debug_entries)
+ dbr_data->dbr_debug_idx = 0;
+ }
+
+ spin_unlock_bh(&dbr_data->lock);
+}
+
+void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats)
+{
+ struct ath11k_base *ab = ar->ab;
+ bool is_end = true;
+
+ /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_RSSI_PER_CHAIN_STAT and
+ * WMI_REQUEST_VDEV_STAT requests have been already processed.
+ */
+ if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
+ if (list_empty(&stats->bcn)) {
+ ath11k_warn(ab, "empty bcn stats");
+ return;
+ }
+ /* Mark end until we reached the count of all started VDEVs
+ * within the PDEV
+ */
+ if (ar->num_started_vdevs)
+ is_end = ((++ar->fw_stats.num_bcn_recvd) ==
+ ar->num_started_vdevs);
+
+ list_splice_tail_init(&stats->bcn,
+ &ar->fw_stats.bcn);
+
+ if (is_end)
+ complete(&ar->fw_stats_done);
+ }
+}
+
+static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ req_param.vdev_id = 0;
+ req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_pdev_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_pdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pdev_stats = {
+ .open = ath11k_open_pdev_stats,
+ .release = ath11k_release_pdev_stats,
+ .read = ath11k_read_pdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ /* VDEV stats is always sent for all active VDEVs from FW */
+ req_param.vdev_id = 0;
+ req_param.stats_id = WMI_REQUEST_VDEV_STAT;
+
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_vdev_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_vdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_vdev_stats = {
+ .open = ath11k_open_vdev_stats,
+ .release = ath11k_release_vdev_stats,
+ .read = ath11k_read_vdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct ath11k_vif *arvif;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.stats_id = WMI_REQUEST_BCN_STAT;
+ req_param.pdev_id = ar->pdev->pdev_id;
+
+ /* loop all active VDEVs for bcn stats */
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_up)
+ continue;
+
+ req_param.vdev_id = arvif->vdev_id;
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
+ goto err_free;
+ }
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
+
+ /* since beacon stats request is looped for all active VDEVs, saved fw
+ * stats is not freed for each request until done for all active VDEVs
+ */
+ spin_lock_bh(&ar->data_lock);
+ ath11k_fw_stats_bcn_free(&ar->fw_stats.bcn);
+ spin_unlock_bh(&ar->data_lock);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_bcn_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_bcn_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_bcn_stats = {
+ .open = ath11k_open_bcn_stats,
+ .release = ath11k_release_bcn_stats,
+ .read = ath11k_read_bcn_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_simulate_fw_crash(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char buf[] =
+ "To simulate firmware crash write one of the keywords to this file:\n"
+ "`assert` - this will send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n"
+ "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
+static ssize_t ath11k_write_simulate_fw_crash(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->private_data;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar = ab->pdevs[0].ar;
+ char buf[32] = {};
+ ssize_t rc;
+ int i, ret, radioup = 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ar && ar->state == ATH11K_STATE_ON) {
+ radioup = 1;
+ break;
+ }
+ }
+ /* filter partial writes and invalid commands */
+ if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+ return -EINVAL;
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (rc < 0)
+ return rc;
+
+ /* drop the possible '\n' from the end */
+ if (buf[*ppos - 1] == '\n')
+ buf[*ppos - 1] = '\0';
+
+ if (radioup == 0) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!strcmp(buf, "assert")) {
+ ath11k_info(ab, "simulating firmware assert crash\n");
+ ret = ath11k_wmi_force_fw_hang_cmd(ar,
+ ATH11K_WMI_FW_HANG_ASSERT_TYPE,
+ ATH11K_WMI_FW_HANG_DELAY);
+ } else if (!strcmp(buf, "hw-restart")) {
+ ath11k_info(ab, "user requested hw restart\n");
+ queue_work(ab->workqueue_aux, &ab->reset_work);
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ret) {
+ ath11k_warn(ab, "failed to simulate firmware crash: %d\n", ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ return ret;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+ .read = ath11k_read_simulate_fw_crash,
+ .write = ath11k_write_simulate_fw_crash,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u32 filter;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &filter))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (filter == ar->debug.extd_tx_stats) {
+ ret = count;
+ goto out;
+ }
+
+ ar->debug.extd_tx_stats = filter;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32] = {};
+ struct ath11k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+ ar->debug.extd_tx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_tx_stats = {
+ .read = ath11k_read_enable_extd_tx_stats,
+ .write = ath11k_write_enable_extd_tx_stats,
+ .open = simple_open
+};
+
+static ssize_t ath11k_write_extd_rx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct ath11k_base *ab = ar->ab;
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
+ u32 enable, rx_filter = 0, ring_id;
+ int i;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (enable > 1) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (enable == ar->debug.extd_rx_stats) {
+ ret = count;
+ goto exit;
+ }
+
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
+ ar->debug.extd_rx_stats = enable;
+ ret = count;
+ goto exit;
+ }
+
+ if (enable) {
+ rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE;
+
+ tlv_filter.rx_filter = rx_filter;
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ } else {
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+ }
+
+ ar->debug.rx_filter = tlv_filter.rx_filter;
+
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE, &tlv_filter);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto exit;
+ }
+ }
+
+ ar->debug.extd_rx_stats = enable;
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_extd_rx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->debug.extd_rx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_rx_stats = {
+ .read = ath11k_read_extd_rx_stats,
+ .write = ath11k_write_extd_rx_stats,
+ .open = simple_open,
+};
+
+static int ath11k_fill_bp_stats(struct ath11k_base *ab,
+ struct ath11k_bp_stats *bp_stats,
+ char *buf, int len, int size)
+{
+ lockdep_assert_held(&ab->base_lock);
+
+ len += scnprintf(buf + len, size - len, "count: %u\n",
+ bp_stats->count);
+ len += scnprintf(buf + len, size - len, "hp: %u\n",
+ bp_stats->hp);
+ len += scnprintf(buf + len, size - len, "tp: %u\n",
+ bp_stats->tp);
+ len += scnprintf(buf + len, size - len, "seen before: %ums\n\n",
+ jiffies_to_msecs(jiffies - bp_stats->jiffies));
+ return len;
+}
+
+static ssize_t ath11k_debugfs_dump_soc_ring_bp_stats(struct ath11k_base *ab,
+ char *buf, int size)
+{
+ struct ath11k_bp_stats *bp_stats;
+ bool stats_rxd = false;
+ u8 i, pdev_idx;
+ int len = 0;
+
+ len += scnprintf(buf + len, size - len, "\nBackpressure Stats\n");
+ len += scnprintf(buf + len, size - len, "==================\n");
+
+ spin_lock_bh(&ab->base_lock);
+ for (i = 0; i < HTT_SW_UMAC_RING_IDX_MAX; i++) {
+ bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[i];
+
+ if (!bp_stats->count)
+ continue;
+
+ len += scnprintf(buf + len, size - len, "Ring: %s\n",
+ htt_bp_umac_ring[i]);
+ len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
+ stats_rxd = true;
+ }
+
+ for (i = 0; i < HTT_SW_LMAC_RING_IDX_MAX; i++) {
+ for (pdev_idx = 0; pdev_idx < MAX_RADIOS; pdev_idx++) {
+ bp_stats =
+ &ab->soc_stats.bp_stats.lmac_ring_bp_stats[i][pdev_idx];
+
+ if (!bp_stats->count)
+ continue;
+
+ len += scnprintf(buf + len, size - len, "Ring: %s\n",
+ htt_bp_lmac_ring[i]);
+ len += scnprintf(buf + len, size - len, "pdev: %d\n",
+ pdev_idx);
+ len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
+ stats_rxd = true;
+ }
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!stats_rxd)
+ len += scnprintf(buf + len, size - len,
+ "No Ring Backpressure stats received\n\n");
+
+ return len;
+}
+
+static ssize_t ath11k_debugfs_dump_soc_dp_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->private_data;
+ struct ath11k_soc_dp_stats *soc_stats = &ab->soc_stats;
+ int len = 0, i, retval;
+ const int size = 4096;
+ static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
+ "Overflow", "MPDU len", "FCS", "Decrypt", "TKIP MIC",
+ "Unencrypt", "MSDU len", "MSDU limit", "WiFi parse",
+ "AMSDU parse", "SA timeout", "DA timeout",
+ "Flow timeout", "Flush req"};
+ static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = {
+ "Desc addr zero", "Desc inval", "AMPDU in non BA",
+ "Non BA dup", "BA dup", "Frame 2k jump", "BAR 2k jump",
+ "Frame OOR", "BAR OOR", "No BA session",
+ "Frame SN equal SSN", "PN check fail", "2k err",
+ "PN err", "Desc blocked"};
+
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len, "SOC RX STATS:\n\n");
+ len += scnprintf(buf + len, size - len, "err ring pkts: %u\n",
+ soc_stats->err_ring_pkts);
+ len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n",
+ soc_stats->invalid_rbm);
+ len += scnprintf(buf + len, size - len, "RXDMA errors:\n");
+ for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ rxdma_err[i], soc_stats->rxdma_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nREO errors:\n");
+ for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ reo_err[i], soc_stats->reo_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n");
+ len += scnprintf(buf + len, size - len,
+ "ring0: %u\nring1: %u\nring2: %u\nring3: %u\n",
+ soc_stats->hal_reo_error[0],
+ soc_stats->hal_reo_error[1],
+ soc_stats->hal_reo_error[2],
+ soc_stats->hal_reo_error[3]);
+
+ len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n");
+ len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
+
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++)
+ len += scnprintf(buf + len, size - len, "ring%d: %u\n",
+ i, soc_stats->tx_err.desc_na[i]);
+
+ len += scnprintf(buf + len, size - len,
+ "\nMisc Transmit Failures: %d\n",
+ atomic_read(&soc_stats->tx_err.misc_fail));
+
+ len += ath11k_debugfs_dump_soc_ring_bp_stats(ab, buf + len, size - len);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_soc_dp_stats = {
+ .read = ath11k_debugfs_dump_soc_dp_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_write_fw_dbglog(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[128] = {};
+ struct ath11k_fw_dbglog dbglog;
+ unsigned int param, mod_id_index, is_end;
+ u64 value;
+ int ret, num;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ num = sscanf(buf, "%u %llx %u %u", &param, &value, &mod_id_index, &is_end);
+
+ if (num < 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (param == WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP ||
+ param == WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP) {
+ if (num != 4 || mod_id_index > (MAX_MODULE_ID_BITMAP_WORDS - 1)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ar->debug.module_id_bitmap[mod_id_index] = upper_32_bits(value);
+ if (!is_end) {
+ ret = count;
+ goto out;
+ }
+ } else {
+ if (num != 2) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ dbglog.param = param;
+ dbglog.value = lower_32_bits(value);
+ ret = ath11k_wmi_fw_dbglog_cfg(ar, ar->debug.module_id_bitmap, &dbglog);
+ if (ret) {
+ ath11k_warn(ar->ab, "fw dbglog config failed from debugfs: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_fw_dbglog = {
+ .write = ath11k_write_fw_dbglog,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_sram_dump(struct inode *inode, struct file *file)
+{
+ struct ath11k_base *ab = inode->i_private;
+ u8 *buf;
+ u32 start, end;
+ int ret;
+
+ start = ab->hw_params.sram_dump.start;
+ end = ab->hw_params.sram_dump.end;
+
+ buf = vmalloc(end - start + 1);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ath11k_hif_read(ab, buf, start, end);
+ if (ret) {
+ ath11k_warn(ab, "failed to dump sram: %d\n", ret);
+ vfree(buf);
+ return ret;
+ }
+
+ file->private_data = buf;
+ return 0;
+}
+
+static ssize_t ath11k_read_sram_dump(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->f_inode->i_private;
+ const char *buf = file->private_data;
+ int len;
+ u32 start, end;
+
+ start = ab->hw_params.sram_dump.start;
+ end = ab->hw_params.sram_dump.end;
+ len = end - start + 1;
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static int ath11k_release_sram_dump(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations fops_sram_dump = {
+ .open = ath11k_open_sram_dump,
+ .read = ath11k_read_sram_dump,
+ .release = ath11k_release_sram_dump,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
+{
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
+ &fops_simulate_fw_crash);
+
+ debugfs_create_file("soc_dp_stats", 0400, ab->debugfs_soc, ab,
+ &fops_soc_dp_stats);
+
+ if (ab->hw_params.sram_dump.start != 0)
+ debugfs_create_file("sram", 0400, ab->debugfs_soc, ab,
+ &fops_sram_dump);
+
+ return 0;
+}
+
+void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+}
+
+int ath11k_debugfs_soc_create(struct ath11k_base *ab)
+{
+ struct dentry *root;
+ bool dput_needed;
+ char name[64];
+ int ret;
+
+ root = debugfs_lookup("ath11k", NULL);
+ if (!root) {
+ root = debugfs_create_dir("ath11k", NULL);
+ if (IS_ERR_OR_NULL(root))
+ return PTR_ERR(root);
+
+ dput_needed = false;
+ } else {
+ /* a dentry from lookup() needs dput() after we don't use it */
+ dput_needed = true;
+ }
+
+ scnprintf(name, sizeof(name), "%s-%s", ath11k_bus_str(ab->hif.bus),
+ dev_name(ab->dev));
+
+ ab->debugfs_soc = debugfs_create_dir(name, root);
+ if (IS_ERR_OR_NULL(ab->debugfs_soc)) {
+ ret = PTR_ERR(ab->debugfs_soc);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (dput_needed)
+ dput(root);
+
+ return ret;
+}
+
+void ath11k_debugfs_soc_destroy(struct ath11k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+
+ /* We are not removing ath11k directory on purpose, even if it
+ * would be empty. This simplifies the directory handling and it's
+ * a minor cosmetic issue to leave an empty ath11k directory to
+ * debugfs.
+ */
+}
+EXPORT_SYMBOL(ath11k_debugfs_soc_destroy);
+
+void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
+{
+ struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
+ ar->debug.debugfs_pdev);
+
+ ar->fw_stats.debugfs_fwstats = fwstats_dir;
+
+ /* all stats debugfs files created are under "fw_stats" directory
+ * created per PDEV
+ */
+ debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
+ &fops_pdev_stats);
+ debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar,
+ &fops_vdev_stats);
+ debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
+ &fops_bcn_stats);
+}
+
+static ssize_t ath11k_write_pktlog_filter(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct ath11k_base *ab = ar->ab;
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
+ u32 rx_filter = 0, ring_id, filter, mode;
+ u8 buf[128] = {};
+ int i, ret, rx_buf_sz = 0;
+ ssize_t rc;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (rc < 0) {
+ ret = rc;
+ goto out;
+ }
+ buf[rc] = '\0';
+
+ ret = sscanf(buf, "0x%x %u", &filter, &mode);
+ if (ret != 2) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (filter) {
+ ret = ath11k_wmi_pdev_pktlog_enable(ar, filter);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to enable pktlog filter %x: %d\n",
+ ar->debug.pktlog_filter, ret);
+ goto out;
+ }
+ } else {
+ ret = ath11k_wmi_pdev_pktlog_disable(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable pktlog: %d\n", ret);
+ goto out;
+ }
+ }
+
+ /* Clear rx filter set for monitor mode and rx status */
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ rx_buf_sz, &tlv_filter);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto out;
+ }
+ }
+#define HTT_RX_FILTER_TLV_LITE_MODE \
+ (HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_START)
+
+ if (mode == ATH11K_PKTLOG_MODE_FULL) {
+ rx_filter = HTT_RX_FILTER_TLV_LITE_MODE |
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START |
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER |
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION;
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ } else if (mode == ATH11K_PKTLOG_MODE_LITE) {
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_PKTLOG);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable pktlog lite: %d\n", ret);
+ goto out;
+ }
+
+ rx_filter = HTT_RX_FILTER_TLV_LITE_MODE;
+ rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
+ } else {
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+ rx_filter = tlv_filter.rx_filter;
+
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_DEFAULT);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to send htt ppdu stats req: %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ tlv_filter.rx_filter = rx_filter;
+ if (rx_filter) {
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+ ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS,
+ rx_buf_sz, &tlv_filter);
+
+ if (ret) {
+ ath11k_warn(ab, "failed to set rx filter for monitor status ring\n");
+ goto out;
+ }
+ }
+
+ ath11k_info(ab, "pktlog mode %s\n",
+ ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
+
+ ar->debug.pktlog_filter = filter;
+ ar->debug.pktlog_mode = mode;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_pktlog_filter(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32] = {};
+ struct ath11k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x %08x\n",
+ ar->debug.pktlog_filter,
+ ar->debug.pktlog_mode);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pktlog_filter = {
+ .read = ath11k_read_pktlog_filter,
+ .write = ath11k_write_pktlog_filter,
+ .open = simple_open
+};
+
+static ssize_t ath11k_write_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ int ret;
+
+ ret = ath11k_wmi_simulate_radar(ar);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = ath11k_write_simulate_radar,
+ .open = simple_open
+};
+
+static ssize_t ath11k_debug_dump_dbr_entries(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_dbg_dbr_data *dbr_dbg_data = file->private_data;
+ static const char * const event_id_to_string[] = {"empty", "Rx", "Replenish"};
+ int size = ATH11K_DEBUG_DBR_ENTRIES_MAX * 100;
+ char *buf;
+ int i, ret;
+ int len = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len,
+ "-----------------------------------------\n");
+ len += scnprintf(buf + len, size - len,
+ "| idx | hp | tp | timestamp | event |\n");
+ len += scnprintf(buf + len, size - len,
+ "-----------------------------------------\n");
+
+ spin_lock_bh(&dbr_dbg_data->lock);
+
+ for (i = 0; i < dbr_dbg_data->num_ring_debug_entries; i++) {
+ len += scnprintf(buf + len, size - len,
+ "|%4u|%8u|%8u|%11llu|%8s|\n", i,
+ dbr_dbg_data->entries[i].hp,
+ dbr_dbg_data->entries[i].tp,
+ dbr_dbg_data->entries[i].timestamp,
+ event_id_to_string[dbr_dbg_data->entries[i].event]);
+ }
+
+ spin_unlock_bh(&dbr_dbg_data->lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations fops_debug_dump_dbr_entries = {
+ .read = ath11k_debug_dump_dbr_entries,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_debugfs_dbr_dbg_destroy(struct ath11k *ar, int dbr_id)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_dbg_data;
+
+ if (!ar->debug.dbr_debug[dbr_id])
+ return;
+
+ dbr_debug = ar->debug.dbr_debug[dbr_id];
+ dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+
+ debugfs_remove_recursive(dbr_debug->dbr_debugfs);
+ kfree(dbr_dbg_data->entries);
+ kfree(dbr_debug);
+ ar->debug.dbr_debug[dbr_id] = NULL;
+}
+
+static int ath11k_debugfs_dbr_dbg_init(struct ath11k *ar, int dbr_id)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_dbg_data;
+ static const char * const dbr_id_to_str[] = {"spectral", "CFR"};
+
+ if (ar->debug.dbr_debug[dbr_id])
+ return 0;
+
+ ar->debug.dbr_debug[dbr_id] = kzalloc(sizeof(*dbr_debug),
+ GFP_KERNEL);
+
+ if (!ar->debug.dbr_debug[dbr_id])
+ return -ENOMEM;
+
+ dbr_debug = ar->debug.dbr_debug[dbr_id];
+ dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+
+ if (dbr_debug->dbr_debugfs)
+ return 0;
+
+ dbr_debug->dbr_debugfs = debugfs_create_dir(dbr_id_to_str[dbr_id],
+ ar->debug.debugfs_pdev);
+ if (IS_ERR_OR_NULL(dbr_debug->dbr_debugfs)) {
+ if (IS_ERR(dbr_debug->dbr_debugfs))
+ return PTR_ERR(dbr_debug->dbr_debugfs);
+ return -ENOMEM;
+ }
+
+ dbr_debug->dbr_debug_enabled = true;
+ dbr_dbg_data->num_ring_debug_entries = ATH11K_DEBUG_DBR_ENTRIES_MAX;
+ dbr_dbg_data->dbr_debug_idx = 0;
+ dbr_dbg_data->entries = kcalloc(ATH11K_DEBUG_DBR_ENTRIES_MAX,
+ sizeof(struct ath11k_dbg_dbr_entry),
+ GFP_KERNEL);
+ if (!dbr_dbg_data->entries)
+ return -ENOMEM;
+
+ spin_lock_init(&dbr_dbg_data->lock);
+
+ debugfs_create_file("dump_dbr_debug", 0444, dbr_debug->dbr_debugfs,
+ dbr_dbg_data, &fops_debug_dump_dbr_entries);
+
+ return 0;
+}
+
+static ssize_t ath11k_debugfs_write_enable_dbr_dbg(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32] = {};
+ u32 dbr_id, enable;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ goto out;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%u %u", &dbr_id, &enable);
+ if (ret != 2 || dbr_id > 1 || enable > 1) {
+ ret = -EINVAL;
+ ath11k_warn(ar->ab, "usage: echo <dbr_id> <val> dbr_id:0-Spectral 1-CFR val:0-disable 1-enable\n");
+ goto out;
+ }
+
+ if (enable) {
+ ret = ath11k_debugfs_dbr_dbg_init(ar, dbr_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "db ring module debugfs init failed: %d\n",
+ ret);
+ goto out;
+ }
+ } else {
+ ath11k_debugfs_dbr_dbg_destroy(ar, dbr_id);
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_dbr_debug = {
+ .write = ath11k_debugfs_write_enable_dbr_dbg,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_write_ps_timekeeper_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ ssize_t ret;
+ u8 ps_timekeeper_enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &ps_timekeeper_enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!ar->ps_state_enable) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ar->ps_timekeeper_enable = !!ps_timekeeper_enable;
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath11k_read_ps_timekeeper_enable(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->ps_timekeeper_enable);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ps_timekeeper_enable = {
+ .read = ath11k_read_ps_timekeeper_enable,
+ .write = ath11k_write_ps_timekeeper_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_reset_peer_ps_duration(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->ps_total_duration = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static ssize_t ath11k_write_reset_ps_duration(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ int ret;
+ u8 reset_ps_duration;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &reset_ps_duration))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!ar->ps_state_enable) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_reset_peer_ps_duration,
+ ar);
+
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_reset_ps_duration = {
+ .write = ath11k_write_reset_ps_duration,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_peer_ps_state_disable(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
+ arsta->ps_start_time = 0;
+ arsta->ps_total_duration = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static ssize_t ath11k_write_ps_state_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct ath11k_pdev *pdev = ar->pdev;
+ int ret;
+ u32 param;
+ u8 ps_state_enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ps_state_enable = !!ps_state_enable;
+
+ if (ar->ps_state_enable == ps_state_enable) {
+ ret = count;
+ goto exit;
+ }
+
+ param = WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE;
+ ret = ath11k_wmi_pdev_set_param(ar, param, ps_state_enable, pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to enable ps_state_enable: %d\n",
+ ret);
+ goto exit;
+ }
+ ar->ps_state_enable = ps_state_enable;
+
+ if (!ar->ps_state_enable) {
+ ar->ps_timekeeper_enable = false;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_peer_ps_state_disable,
+ ar);
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath11k_read_ps_state_enable(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->ps_state_enable);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ps_state_enable = {
+ .read = ath11k_read_ps_state_enable,
+ .write = ath11k_write_ps_state_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath11k_debugfs_register(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ char pdev_name[10];
+ char buf[100] = {};
+
+ snprintf(pdev_name, sizeof(pdev_name), "%s%u", "mac", ar->pdev_idx);
+
+ ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc);
+ if (IS_ERR(ar->debug.debugfs_pdev))
+ return PTR_ERR(ar->debug.debugfs_pdev);
+
+ /* Create a symlink under ieee80211/phy* */
+ snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev);
+ debugfs_create_symlink("ath11k", ar->hw->wiphy->debugfsdir, buf);
+
+ ath11k_debugfs_htt_stats_init(ar);
+
+ ath11k_debugfs_fw_stats_init(ar);
+
+ debugfs_create_file("ext_tx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_tx_stats);
+ debugfs_create_file("ext_rx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_rx_stats);
+ debugfs_create_file("pktlog_filter", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_pktlog_filter);
+ debugfs_create_file("fw_dbglog_config", 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_fw_dbglog);
+
+ if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
+ debugfs_create_file("dfs_simulate_radar", 0200,
+ ar->debug.debugfs_pdev, ar,
+ &fops_simulate_radar);
+ debugfs_create_bool("dfs_block_radar_events", 0200,
+ ar->debug.debugfs_pdev,
+ &ar->dfs_block_radar_events);
+ }
+
+ if (ab->hw_params.dbr_debug_support)
+ debugfs_create_file("enable_dbr_debug", 0200, ar->debug.debugfs_pdev,
+ ar, &fops_dbr_debug);
+
+ debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_pdev, ar,
+ &fops_ps_state_enable);
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ debugfs_create_file("ps_timekeeper_enable", 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_ps_timekeeper_enable);
+
+ debugfs_create_file("reset_ps_duration", 0200,
+ ar->debug.debugfs_pdev, ar,
+ &fops_reset_ps_duration);
+ }
+
+ return 0;
+}
+
+void ath11k_debugfs_unregister(struct ath11k *ar)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_dbg_data;
+ int i;
+
+ for (i = 0; i < WMI_DIRECT_BUF_MAX; i++) {
+ dbr_debug = ar->debug.dbr_debug[i];
+ if (!dbr_debug)
+ continue;
+
+ dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+ kfree(dbr_dbg_data->entries);
+ debugfs_remove_recursive(dbr_debug->dbr_debugfs);
+ kfree(dbr_debug);
+ ar->debug.dbr_debug[i] = NULL;
+ }
+}
+
+static ssize_t ath11k_write_twt_add_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_add_dialog_params params = {};
+ struct wmi_twt_enable_params twt_params = {};
+ struct ath11k *ar = arvif->ar;
+ u8 buf[128] = {};
+ int ret;
+
+ if (ar->twt_enabled == 0) {
+ ath11k_err(ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf,
+ "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u %u %u %hhu %hhu %hhu %hhu %hhu",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id,
+ &params.wake_intvl_us,
+ &params.wake_intvl_mantis,
+ &params.wake_dura_us,
+ &params.sp_offset_us,
+ &params.twt_cmd,
+ &params.flag_bcast,
+ &params.flag_trigger,
+ &params.flag_flow_type,
+ &params.flag_protection);
+ if (ret != 16)
+ return -EINVAL;
+
+ /* In the case of station vif, TWT is entirely handled by
+ * the firmware based on the input parameters in the TWT enable
+ * WMI command that is sent to the target during assoc.
+ * For manually testing the TWT feature, we need to first disable
+ * TWT and send enable command again with TWT input parameter
+ * sta_cong_timer_ms set to 0.
+ */
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ twt_params.sta_cong_timer_ms = 0;
+
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
+ }
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_add_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ goto err_twt_add_dialog;
+
+ return count;
+
+err_twt_add_dialog:
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
+ }
+
+ return ret;
+}
+
+static ssize_t ath11k_write_twt_del_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_del_dialog_params params = {};
+ struct wmi_twt_enable_params twt_params = {};
+ struct ath11k *ar = arvif->ar;
+ u8 buf[64] = {};
+ int ret;
+
+ if (ar->twt_enabled == 0) {
+ ath11k_err(ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id);
+ if (ret != 7)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_del_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
+ }
+
+ return count;
+}
+
+static ssize_t ath11k_write_twt_pause_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_pause_dialog_params params = {};
+ u8 buf[64] = {};
+ int ret;
+
+ if (arvif->ar->twt_enabled == 0) {
+ ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id);
+ if (ret != 7)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_pause_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t ath11k_write_twt_resume_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_resume_dialog_params params = {};
+ u8 buf[64] = {};
+ int ret;
+
+ if (arvif->ar->twt_enabled == 0) {
+ ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id,
+ &params.sp_offset_us,
+ &params.next_twt_size);
+ if (ret != 9)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_resume_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations ath11k_fops_twt_add_dialog = {
+ .write = ath11k_write_twt_add_dialog,
+ .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_del_dialog = {
+ .write = ath11k_write_twt_del_dialog,
+ .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_pause_dialog = {
+ .write = ath11k_write_twt_pause_dialog,
+ .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_resume_dialog = {
+ .write = ath11k_write_twt_resume_dialog,
+ .open = simple_open
+};
+
+void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_base *ab = arvif->ar->ab;
+ struct dentry *debugfs_twt;
+
+ if (arvif->vif->type != NL80211_IFTYPE_AP &&
+ !(arvif->vif->type == NL80211_IFTYPE_STATION &&
+ test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map)))
+ return;
+
+ debugfs_twt = debugfs_create_dir("twt",
+ arvif->vif->debugfs_dir);
+ debugfs_create_file("add_dialog", 0200, debugfs_twt,
+ arvif, &ath11k_fops_twt_add_dialog);
+
+ debugfs_create_file("del_dialog", 0200, debugfs_twt,
+ arvif, &ath11k_fops_twt_del_dialog);
+
+ debugfs_create_file("pause_dialog", 0200, debugfs_twt,
+ arvif, &ath11k_fops_twt_pause_dialog);
+
+ debugfs_create_file("resume_dialog", 0200, debugfs_twt,
+ arvif, &ath11k_fops_twt_resume_dialog);
+}
+
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
new file mode 100644
index 000000000000..ed7fec177588
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -0,0 +1,391 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH11K_DEBUGFS_H_
+#define _ATH11K_DEBUGFS_H_
+
+#include "hal_tx.h"
+
+#define ATH11K_TX_POWER_MAX_VAL 70
+#define ATH11K_TX_POWER_MIN_VAL 0
+
+/* htt_dbg_ext_stats_type */
+enum ath11k_dbg_htt_ext_stats_type {
+ ATH11K_DBG_HTT_EXT_STATS_RESET = 0,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_RX = 2,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ = 3,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
+ ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ = 7,
+ ATH11K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_INFO = 11,
+ ATH11K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
+ ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ = 13,
+ ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO = 14,
+ ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
+ ATH11K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
+ ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST = 18,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
+ ATH11K_DBG_HTT_EXT_STATS_TWT_SESSIONS = 20,
+ ATH11K_DBG_HTT_EXT_STATS_REO_RESOURCE_STATS = 21,
+ ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
+ ATH11K_DBG_HTT_EXT_STATS_RING_BACKPRESSURE_STATS = 24,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS = 29,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF_STATS = 31,
+ ATH11K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32,
+ ATH11K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
+
+ /* keep this last */
+ ATH11K_DBG_HTT_NUM_EXT_STATS,
+};
+
+#define ATH11K_DEBUG_DBR_ENTRIES_MAX 512
+
+enum ath11k_dbg_dbr_event {
+ ATH11K_DBG_DBR_EVENT_INVALID,
+ ATH11K_DBG_DBR_EVENT_RX,
+ ATH11K_DBG_DBR_EVENT_REPLENISH,
+ ATH11K_DBG_DBR_EVENT_MAX,
+};
+
+struct ath11k_dbg_dbr_entry {
+ u32 hp;
+ u32 tp;
+ u64 timestamp;
+ enum ath11k_dbg_dbr_event event;
+};
+
+struct ath11k_dbg_dbr_data {
+ /* protects ath11k_db_ring_debug data */
+ spinlock_t lock;
+ struct ath11k_dbg_dbr_entry *entries;
+ u32 dbr_debug_idx;
+ u32 num_ring_debug_entries;
+};
+
+struct ath11k_debug_dbr {
+ struct ath11k_dbg_dbr_data dbr_dbg_data;
+ struct dentry *dbr_debugfs;
+ bool dbr_debug_enabled;
+};
+
+struct debug_htt_stats_req {
+ bool done;
+ u8 pdev_id;
+ u8 type;
+ u8 peer_addr[ETH_ALEN];
+ struct completion cmpln;
+ u32 buf_len;
+ u8 buf[];
+};
+
+struct ath_pktlog_hdr {
+ u16 flags;
+ u16 missed_cnt;
+ u16 log_type;
+ u16 size;
+ u32 timestamp;
+ u32 type_specific_data;
+ u8 payload[];
+};
+
+#define ATH11K_HTT_PEER_STATS_RESET BIT(16)
+
+#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
+#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
+
+enum ath11k_pktlog_filter {
+ ATH11K_PKTLOG_RX = 0x000000001,
+ ATH11K_PKTLOG_TX = 0x000000002,
+ ATH11K_PKTLOG_RCFIND = 0x000000004,
+ ATH11K_PKTLOG_RCUPDATE = 0x000000008,
+ ATH11K_PKTLOG_EVENT_SMART_ANT = 0x000000020,
+ ATH11K_PKTLOG_EVENT_SW = 0x000000040,
+ ATH11K_PKTLOG_ANY = 0x00000006f,
+};
+
+enum ath11k_pktlog_mode {
+ ATH11K_PKTLOG_MODE_LITE = 1,
+ ATH11K_PKTLOG_MODE_FULL = 2,
+};
+
+enum ath11k_pktlog_enum {
+ ATH11K_PKTLOG_TYPE_INVALID = 0,
+ ATH11K_PKTLOG_TYPE_TX_CTRL = 1,
+ ATH11K_PKTLOG_TYPE_TX_STAT = 2,
+ ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3,
+ ATH11K_PKTLOG_TYPE_RX_STAT = 5,
+ ATH11K_PKTLOG_TYPE_RC_FIND = 6,
+ ATH11K_PKTLOG_TYPE_RC_UPDATE = 7,
+ ATH11K_PKTLOG_TYPE_TX_VIRT_ADDR = 8,
+ ATH11K_PKTLOG_TYPE_RX_CBF = 10,
+ ATH11K_PKTLOG_TYPE_RX_STATBUF = 22,
+ ATH11K_PKTLOG_TYPE_PPDU_STATS = 23,
+ ATH11K_PKTLOG_TYPE_LITE_RX = 24,
+};
+
+enum ath11k_dbg_aggr_mode {
+ ATH11K_DBG_AGGR_MODE_AUTO,
+ ATH11K_DBG_AGGR_MODE_MANUAL,
+ ATH11K_DBG_AGGR_MODE_MAX,
+};
+
+enum fw_dbglog_wlan_module_id {
+ WLAN_MODULE_ID_MIN = 0,
+ WLAN_MODULE_INF = WLAN_MODULE_ID_MIN,
+ WLAN_MODULE_WMI,
+ WLAN_MODULE_STA_PWRSAVE,
+ WLAN_MODULE_WHAL,
+ WLAN_MODULE_COEX,
+ WLAN_MODULE_ROAM,
+ WLAN_MODULE_RESMGR_CHAN_MANAGER,
+ WLAN_MODULE_RESMGR,
+ WLAN_MODULE_VDEV_MGR,
+ WLAN_MODULE_SCAN,
+ WLAN_MODULE_RATECTRL,
+ WLAN_MODULE_AP_PWRSAVE,
+ WLAN_MODULE_BLOCKACK,
+ WLAN_MODULE_MGMT_TXRX,
+ WLAN_MODULE_DATA_TXRX,
+ WLAN_MODULE_HTT,
+ WLAN_MODULE_HOST,
+ WLAN_MODULE_BEACON,
+ WLAN_MODULE_OFFLOAD,
+ WLAN_MODULE_WAL,
+ WLAN_WAL_MODULE_DE,
+ WLAN_MODULE_PCIELP,
+ WLAN_MODULE_RTT,
+ WLAN_MODULE_RESOURCE,
+ WLAN_MODULE_DCS,
+ WLAN_MODULE_CACHEMGR,
+ WLAN_MODULE_ANI,
+ WLAN_MODULE_P2P,
+ WLAN_MODULE_CSA,
+ WLAN_MODULE_NLO,
+ WLAN_MODULE_CHATTER,
+ WLAN_MODULE_WOW,
+ WLAN_MODULE_WAL_VDEV,
+ WLAN_MODULE_WAL_PDEV,
+ WLAN_MODULE_TEST,
+ WLAN_MODULE_STA_SMPS,
+ WLAN_MODULE_SWBMISS,
+ WLAN_MODULE_WMMAC,
+ WLAN_MODULE_TDLS,
+ WLAN_MODULE_HB,
+ WLAN_MODULE_TXBF,
+ WLAN_MODULE_BATCH_SCAN,
+ WLAN_MODULE_THERMAL_MGR,
+ WLAN_MODULE_PHYERR_DFS,
+ WLAN_MODULE_RMC,
+ WLAN_MODULE_STATS,
+ WLAN_MODULE_NAN,
+ WLAN_MODULE_IBSS_PWRSAVE,
+ WLAN_MODULE_HIF_UART,
+ WLAN_MODULE_LPI,
+ WLAN_MODULE_EXTSCAN,
+ WLAN_MODULE_UNIT_TEST,
+ WLAN_MODULE_MLME,
+ WLAN_MODULE_SUPPL,
+ WLAN_MODULE_ERE,
+ WLAN_MODULE_OCB,
+ WLAN_MODULE_RSSI_MONITOR,
+ WLAN_MODULE_WPM,
+ WLAN_MODULE_CSS,
+ WLAN_MODULE_PPS,
+ WLAN_MODULE_SCAN_CH_PREDICT,
+ WLAN_MODULE_MAWC,
+ WLAN_MODULE_CMC_QMIC,
+ WLAN_MODULE_EGAP,
+ WLAN_MODULE_NAN20,
+ WLAN_MODULE_QBOOST,
+ WLAN_MODULE_P2P_LISTEN_OFFLOAD,
+ WLAN_MODULE_HALPHY,
+ WLAN_WAL_MODULE_ENQ,
+ WLAN_MODULE_GNSS,
+ WLAN_MODULE_WAL_MEM,
+ WLAN_MODULE_SCHED_ALGO,
+ WLAN_MODULE_TX,
+ WLAN_MODULE_RX,
+ WLAN_MODULE_WLM,
+ WLAN_MODULE_RU_ALLOCATOR,
+ WLAN_MODULE_11K_OFFLOAD,
+ WLAN_MODULE_STA_TWT,
+ WLAN_MODULE_AP_TWT,
+ WLAN_MODULE_UL_OFDMA,
+ WLAN_MODULE_HPCS_PULSE,
+ WLAN_MODULE_DTF,
+ WLAN_MODULE_QUIET_IE,
+ WLAN_MODULE_SHMEM_MGR,
+ WLAN_MODULE_CFIR,
+ WLAN_MODULE_CODE_COVER,
+ WLAN_MODULE_SHO,
+ WLAN_MODULE_MLO_MGR,
+ WLAN_MODULE_PEER_INIT,
+ WLAN_MODULE_STA_MLO_PS,
+
+ WLAN_MODULE_ID_MAX,
+ WLAN_MODULE_ID_INVALID = WLAN_MODULE_ID_MAX,
+};
+
+enum fw_dbglog_log_level {
+ ATH11K_FW_DBGLOG_ML = 0,
+ ATH11K_FW_DBGLOG_VERBOSE = 0,
+ ATH11K_FW_DBGLOG_INFO,
+ ATH11K_FW_DBGLOG_INFO_LVL_1,
+ ATH11K_FW_DBGLOG_INFO_LVL_2,
+ ATH11K_FW_DBGLOG_WARN,
+ ATH11K_FW_DBGLOG_ERR,
+ ATH11K_FW_DBGLOG_LVL_MAX
+};
+
+struct ath11k_fw_dbglog {
+ enum wmi_debug_log_param param;
+ union {
+ struct {
+ /* log_level values are given in enum fw_dbglog_log_level */
+ u16 log_level;
+ /* module_id values are given in enum fw_dbglog_wlan_module_id */
+ u16 module_id;
+ };
+ /* value is either log_level&module_id/vdev_id/vdev_id_bitmap/log_level
+ * according to param
+ */
+ u32 value;
+ };
+};
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+int ath11k_debugfs_soc_create(struct ath11k_base *ab);
+void ath11k_debugfs_soc_destroy(struct ath11k_base *ab);
+int ath11k_debugfs_pdev_create(struct ath11k_base *ab);
+void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab);
+int ath11k_debugfs_register(struct ath11k *ar);
+void ath11k_debugfs_unregister(struct ath11k *ar);
+void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats);
+
+void ath11k_debugfs_fw_stats_init(struct ath11k *ar);
+
+static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar)
+{
+ return (ar->debug.pktlog_mode == ATH11K_PKTLOG_MODE_LITE);
+}
+
+static inline bool ath11k_debugfs_is_pktlog_rx_stats_enabled(struct ath11k *ar)
+{
+ return (!ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode);
+}
+
+static inline bool ath11k_debugfs_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
+{
+ return (ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode &&
+ ether_addr_equal(addr, ar->debug.pktlog_peer_addr));
+}
+
+static inline int ath11k_debugfs_is_extd_tx_stats_enabled(struct ath11k *ar)
+{
+ return ar->debug.extd_tx_stats;
+}
+
+static inline int ath11k_debugfs_is_extd_rx_stats_enabled(struct ath11k *ar)
+{
+ return ar->debug.extd_rx_stats;
+}
+
+static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
+{
+ return ar->debug.rx_filter;
+}
+
+void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+ struct hal_srng *srng);
+
+#else
+static inline int ath11k_debugfs_soc_create(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_debugfs_soc_destroy(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_debugfs_register(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline void ath11k_debugfs_unregister(struct ath11k *ar)
+{
+}
+
+static inline void ath11k_debugfs_fw_stats_process(struct ath11k *ar,
+ struct ath11k_fw_stats *stats)
+{
+}
+
+static inline void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
+{
+}
+
+static inline int ath11k_debugfs_is_extd_tx_stats_enabled(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline int ath11k_debugfs_is_extd_rx_stats_enabled(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar)
+{
+ return false;
+}
+
+static inline bool ath11k_debugfs_is_pktlog_rx_stats_enabled(struct ath11k *ar)
+{
+ return false;
+}
+
+static inline bool ath11k_debugfs_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
+{
+ return false;
+}
+
+static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline void
+ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+ struct hal_srng *srng)
+{
+}
+#endif /* CONFIG_ATH11K_DEBUGFS*/
+
+#endif /* _ATH11K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
new file mode 100644
index 000000000000..11d28c42227e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -0,0 +1,4905 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "debug.h"
+#include "debugfs_htt_stats.h"
+
+#define HTT_MAX_PRINT_CHAR_PER_ELEM 15
+
+#define HTT_TLV_HDR_LEN 4
+
+#define PRINT_ARRAY_TO_BUF(out, buflen, arr, str, len, newline) \
+ do { \
+ int index = 0; u8 i; const char *str_val = str; \
+ const char *new_line = newline; \
+ if (str_val) { \
+ index += scnprintf((out + buflen), \
+ (ATH11K_HTT_STATS_BUF_SIZE - buflen), \
+ "%s = ", str_val); \
+ } \
+ for (i = 0; i < len; i++) { \
+ index += scnprintf((out + buflen) + index, \
+ (ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \
+ " %u:%u,", i, arr[i]); \
+ } \
+ index += scnprintf((out + buflen) + index, \
+ (ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \
+ "%s", new_line); \
+ buflen += index; \
+ } while (0)
+
+static inline void htt_print_stats_string_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_stats_string_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 i;
+
+ tag_len = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:\n");
+
+ len += scnprintf(buf + len, buf_len - len,
+ "data = ");
+ for (i = 0; i < tag_len; i++) {
+ len += scnprintf(buf + len,
+ buf_len - len,
+ "%.*s", 4, (char *)&(htt_stats_buf->data[i]));
+ }
+ /* New lines are added for better display */
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+ htt_stats_buf->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+ htt_stats_buf->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "underrun = %u\n",
+ htt_stats_buf->underrun);
+ len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n",
+ htt_stats_buf->hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n",
+ htt_stats_buf->hw_flush);
+ len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n",
+ htt_stats_buf->hw_filt);
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ htt_stats_buf->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n",
+ htt_stats_buf->mpdu_requeued);
+ len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n",
+ htt_stats_buf->tx_xretry);
+ len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n",
+ htt_stats_buf->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n",
+ htt_stats_buf->mpdu_dropped_xretry);
+ len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n",
+ htt_stats_buf->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n",
+ htt_stats_buf->cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n",
+ htt_stats_buf->tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n",
+ htt_stats_buf->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n",
+ htt_stats_buf->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n",
+ htt_stats_buf->txop_ovf);
+ len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n",
+ htt_stats_buf->seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n",
+ htt_stats_buf->seq_failed_queueing);
+ len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n",
+ htt_stats_buf->seq_completed);
+ len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n",
+ htt_stats_buf->seq_restarted);
+ len += scnprintf(buf + len, buf_len - len, "mu_seq_posted = %u\n",
+ htt_stats_buf->mu_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n",
+ htt_stats_buf->seq_switch_hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n",
+ htt_stats_buf->next_seq_posted_dsr);
+ len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n",
+ htt_stats_buf->seq_posted_isr);
+ len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n",
+ htt_stats_buf->seq_ctrl_cached);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n",
+ htt_stats_buf->mpdu_count_tqm);
+ len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n",
+ htt_stats_buf->msdu_count_tqm);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n",
+ htt_stats_buf->mpdu_removed_tqm);
+ len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n",
+ htt_stats_buf->msdu_removed_tqm);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n",
+ htt_stats_buf->mpdus_sw_flush);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+ htt_stats_buf->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n",
+ htt_stats_buf->mpdus_truncated);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n",
+ htt_stats_buf->mpdus_ack_failed);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n",
+ htt_stats_buf->mpdus_expired);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n",
+ htt_stats_buf->mpdus_seq_hw_retry);
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ htt_stats_buf->ack_tlv_proc);
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n",
+ htt_stats_buf->coex_abort_mpdu_cnt_valid);
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n",
+ htt_stats_buf->coex_abort_mpdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n",
+ htt_stats_buf->num_total_ppdus_tried_ota);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n",
+ htt_stats_buf->num_data_ppdus_tried_ota);
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n",
+ htt_stats_buf->local_ctrl_mgmt_enqued);
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n",
+ htt_stats_buf->local_ctrl_mgmt_freed);
+ len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n",
+ htt_stats_buf->local_data_enqued);
+ len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n",
+ htt_stats_buf->local_data_freed);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n",
+ htt_stats_buf->mpdu_tried);
+ len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n",
+ htt_stats_buf->isr_wait_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n",
+ htt_stats_buf->tx_active_dur_us_low);
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n\n",
+ htt_stats_buf->tx_active_dur_us_high);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_urrn_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_urrn_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_URRN_STATS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->urrn_stats, "urrn_stats",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_flush_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_flush_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_FLUSH_REASON_STATS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->flush_errs, "flush_errs",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sifs_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_sifs_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_STATS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_status, "sifs_status",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_phy_err_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_phy_err_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_PHY_ERR_STATS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_errs, "phy_errs",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sifs_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_sifs_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_hist_status,
+ "sifs_hist_status", num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:\n");
+
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u\n",
+ htt_stats_buf->num_data_ppdus_legacy_su);
+
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u\n",
+ htt_stats_buf->num_data_ppdus_ac_su);
+
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u\n",
+ htt_stats_buf->num_data_ppdus_ax_su);
+
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u\n",
+ htt_stats_buf->num_data_ppdus_ac_su_txbf);
+
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n\n",
+ htt_stats_buf->num_data_ppdus_ax_su_txbf);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u32 num_elements = ((tag_len - sizeof(htt_stats_buf->hist_bin_size)) >> 2);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n",
+ htt_stats_buf->hist_bin_size);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist,
+ "tried_mpdu_cnt_hist", num_elements, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_intr_misc_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {};
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n");
+ memcpy(hw_intr_name, &(htt_stats_buf->hw_intr_name[0]),
+ HTT_STATS_MAX_HW_INTR_NAME_LEN);
+ len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n", hw_intr_name);
+ len += scnprintf(buf + len, buf_len - len, "mask = %u\n",
+ htt_stats_buf->mask);
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
+ htt_stats_buf->count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_hw_stats_wd_timeout_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_wd_timeout_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {};
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:\n");
+ memcpy(hw_module_name, &(htt_stats_buf->hw_module_name[0]),
+ HTT_STATS_MAX_HW_MODULE_NAME_LEN);
+ len += scnprintf(buf + len, buf_len - len, "hw_module_name = %s\n",
+ hw_module_name);
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n",
+ htt_stats_buf->count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_pdev_errs_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ htt_stats_buf->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n",
+ htt_stats_buf->tx_abort_fail_count);
+ len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n",
+ htt_stats_buf->rx_abort);
+ len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n",
+ htt_stats_buf->rx_abort_fail_count);
+ len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n",
+ htt_stats_buf->warm_reset);
+ len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n",
+ htt_stats_buf->cold_reset);
+ len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n",
+ htt_stats_buf->tx_flush);
+ len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n",
+ htt_stats_buf->tx_glb_reset);
+ len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n",
+ htt_stats_buf->tx_txq_reset);
+ len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n\n",
+ htt_stats_buf->rx_timeout_reset);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_msdu_flow_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_msdu_flow_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "last_update_timestamp = %u\n",
+ htt_stats_buf->last_update_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "last_add_timestamp = %u\n",
+ htt_stats_buf->last_add_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "last_remove_timestamp = %u\n",
+ htt_stats_buf->last_remove_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "total_processed_msdu_count = %u\n",
+ htt_stats_buf->total_processed_msdu_count);
+ len += scnprintf(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u\n",
+ htt_stats_buf->cur_msdu_count_in_flowq);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+ htt_stats_buf->sw_peer_id);
+ len += scnprintf(buf + len, buf_len - len, "tx_flow_no = %lu\n",
+ FIELD_GET(HTT_MSDU_FLOW_STATS_TX_FLOW_NO,
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_MSDU_FLOW_STATS_TID_NUM,
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule));
+ len += scnprintf(buf + len, buf_len - len, "drop_rule = %lu\n",
+ FIELD_GET(HTT_MSDU_FLOW_STATS_DROP_RULE,
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule));
+ len += scnprintf(buf + len, buf_len - len, "last_cycle_enqueue_count = %u\n",
+ htt_stats_buf->last_cycle_enqueue_count);
+ len += scnprintf(buf + len, buf_len - len, "last_cycle_dequeue_count = %u\n",
+ htt_stats_buf->last_cycle_dequeue_count);
+ len += scnprintf(buf + len, buf_len - len, "last_cycle_drop_count = %u\n",
+ htt_stats_buf->last_cycle_drop_count);
+ len += scnprintf(buf + len, buf_len - len, "current_drop_th = %u\n\n",
+ htt_stats_buf->current_drop_th);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tid_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tid_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tid_name[MAX_HTT_TID_NAME + 1] = {};
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:\n");
+ memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+ len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_SW_PEER_ID,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_TID_NUM,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_NUM_SCHED_PENDING,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n",
+ htt_stats_buf->tid_flags);
+ len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+ htt_stats_buf->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+ htt_stats_buf->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+ htt_stats_buf->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n",
+ htt_stats_buf->qdepth_bytes);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n",
+ htt_stats_buf->qdepth_num_msdu);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n",
+ htt_stats_buf->pause_module_id);
+ len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n\n",
+ htt_stats_buf->block_module_id);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tid_stats_v1_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tid_stats_v1_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tid_name[MAX_HTT_TID_NAME + 1] = {};
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:\n");
+ memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+ len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_SW_PEER_ID,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_TID_NUM,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n",
+ htt_stats_buf->tid_flags);
+ len += scnprintf(buf + len, buf_len - len, "max_qdepth_bytes = %u\n",
+ htt_stats_buf->max_qdepth_bytes);
+ len += scnprintf(buf + len, buf_len - len, "max_qdepth_n_msdus = %u\n",
+ htt_stats_buf->max_qdepth_n_msdus);
+ len += scnprintf(buf + len, buf_len - len, "rsvd = %u\n",
+ htt_stats_buf->rsvd);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n",
+ htt_stats_buf->qdepth_bytes);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n",
+ htt_stats_buf->qdepth_num_msdu);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n",
+ htt_stats_buf->pause_module_id);
+ len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n",
+ htt_stats_buf->block_module_id);
+ len += scnprintf(buf + len, buf_len - len, "allow_n_flags = 0x%x\n",
+ htt_stats_buf->allow_n_flags);
+ len += scnprintf(buf + len, buf_len - len, "sendn_frms_allowed = %u\n\n",
+ htt_stats_buf->sendn_frms_allowed);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_tid_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_tid_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tid_name[MAX_HTT_TID_NAME + 1] = {};
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
+ FIELD_GET(HTT_RX_TID_STATS_SW_PEER_ID,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_RX_TID_STATS_TID_NUM,
+ htt_stats_buf->sw_peer_id__tid_num));
+ memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+ len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+ len += scnprintf(buf + len, buf_len - len, "dup_in_reorder = %u\n",
+ htt_stats_buf->dup_in_reorder);
+ len += scnprintf(buf + len, buf_len - len, "dup_past_outside_window = %u\n",
+ htt_stats_buf->dup_past_outside_window);
+ len += scnprintf(buf + len, buf_len - len, "dup_past_within_window = %u\n",
+ htt_stats_buf->dup_past_within_window);
+ len += scnprintf(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n\n",
+ htt_stats_buf->rxdesc_err_decrypt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_counter_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_counter_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_COUNTER_TLV:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->counter_name,
+ "counter_name",
+ HTT_MAX_COUNTER_NAME, "\n");
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
+ htt_stats_buf->count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_peer_stats_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_peer_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ppdu_cnt = %u\n",
+ htt_stats_buf->ppdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt = %u\n",
+ htt_stats_buf->mpdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "msdu_cnt = %u\n",
+ htt_stats_buf->msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "pause_bitmap = %u\n",
+ htt_stats_buf->pause_bitmap);
+ len += scnprintf(buf + len, buf_len - len, "block_bitmap = %u\n",
+ htt_stats_buf->block_bitmap);
+ len += scnprintf(buf + len, buf_len - len, "last_rssi = %d\n",
+ htt_stats_buf->rssi);
+ len += scnprintf(buf + len, buf_len - len, "enqueued_count = %llu\n",
+ htt_stats_buf->peer_enqueued_count_low |
+ ((u64)htt_stats_buf->peer_enqueued_count_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "dequeued_count = %llu\n",
+ htt_stats_buf->peer_dequeued_count_low |
+ ((u64)htt_stats_buf->peer_dequeued_count_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "dropped_count = %llu\n",
+ htt_stats_buf->peer_dropped_count_low |
+ ((u64)htt_stats_buf->peer_dropped_count_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu\n",
+ htt_stats_buf->ppdu_transmitted_bytes_low |
+ ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "ttl_removed_count = %u\n",
+ htt_stats_buf->peer_ttl_removed_count);
+ len += scnprintf(buf + len, buf_len - len, "inactive_time = %u\n\n",
+ htt_stats_buf->inactive_time);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_peer_details_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_peer_details_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "peer_type = %u\n",
+ htt_stats_buf->peer_type);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+ htt_stats_buf->sw_peer_id);
+ len += scnprintf(buf + len, buf_len - len, "vdev_id = %lu\n",
+ FIELD_GET(HTT_PEER_DETAILS_VDEV_ID,
+ htt_stats_buf->vdev_pdev_ast_idx));
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %lu\n",
+ FIELD_GET(HTT_PEER_DETAILS_PDEV_ID,
+ htt_stats_buf->vdev_pdev_ast_idx));
+ len += scnprintf(buf + len, buf_len - len, "ast_idx = %lu\n",
+ FIELD_GET(HTT_PEER_DETAILS_AST_IDX,
+ htt_stats_buf->vdev_pdev_ast_idx));
+ len += scnprintf(buf + len, buf_len - len,
+ "mac_addr = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n",
+ FIELD_GET(HTT_MAC_ADDR_L32_0,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_1,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_2,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_3,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_H16_0,
+ htt_stats_buf->mac_addr.mac_addr_h16),
+ FIELD_GET(HTT_MAC_ADDR_H16_1,
+ htt_stats_buf->mac_addr.mac_addr_h16));
+ len += scnprintf(buf + len, buf_len - len, "peer_flags = 0x%x\n",
+ htt_stats_buf->peer_flags);
+ len += scnprintf(buf + len, buf_len - len, "qpeer_flags = 0x%x\n\n",
+ htt_stats_buf->qpeer_flags);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_peer_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 j;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
+ htt_stats_buf->tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
+ htt_stats_buf->ack_rssi);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_su_mcs, "tx_su_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mu_mcs, "tx_mu_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream",
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL,
+ HTT_TX_PEER_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm",
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_peer_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 j;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
+ htt_stats_buf->nsts);
+ len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+ htt_stats_buf->rx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
+ htt_stats_buf->rssi_mgmt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
+ htt_stats_buf->rssi_data);
+ len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
+ htt_stats_buf->rssi_comb);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss",
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm",
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw",
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "rssi_chain[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL,
+ HTT_RX_PEER_STATS_NUM_BW_COUNTERS, "\n");
+ }
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "rx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream",
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_sch_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u\n",
+ htt_stats_buf->mu_mimo_err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n\n",
+ htt_stats_buf->mu_mimo_ampdu_underrun_usr);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_mu_mimo_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_stats_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ /* TODO: HKDBG */
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "xretry = %u\n",
+ htt_stats_buf->xretry);
+ len += scnprintf(buf + len, buf_len - len, "underrun_cnt = %u\n",
+ htt_stats_buf->underrun_cnt);
+ len += scnprintf(buf + len, buf_len - len, "flush_cnt = %u\n",
+ htt_stats_buf->flush_cnt);
+ len += scnprintf(buf + len, buf_len - len, "filt_cnt = %u\n",
+ htt_stats_buf->filt_cnt);
+ len += scnprintf(buf + len, buf_len - len, "null_mpdu_bmap = %u\n",
+ htt_stats_buf->null_mpdu_bmap);
+ len += scnprintf(buf + len, buf_len - len, "user_ack_failure = %u\n",
+ htt_stats_buf->user_ack_failure);
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ htt_stats_buf->ack_tlv_proc);
+ len += scnprintf(buf + len, buf_len - len, "sched_id_proc = %u\n",
+ htt_stats_buf->sched_id_proc);
+ len += scnprintf(buf + len, buf_len - len, "null_mpdu_tx_count = %u\n",
+ htt_stats_buf->null_mpdu_tx_count);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u\n",
+ htt_stats_buf->mpdu_bmap_not_recvd);
+ len += scnprintf(buf + len, buf_len - len, "num_bar = %u\n",
+ htt_stats_buf->num_bar);
+ len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
+ htt_stats_buf->rts);
+ len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
+ htt_stats_buf->cts2self);
+ len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
+ htt_stats_buf->qos_null);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_tried_cnt = %u\n",
+ htt_stats_buf->mpdu_tried_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queued_cnt = %u\n",
+ htt_stats_buf->mpdu_queued_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u\n",
+ htt_stats_buf->mpdu_ack_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_filt_cnt = %u\n",
+ htt_stats_buf->mpdu_filt_cnt);
+ len += scnprintf(buf + len, buf_len - len, "false_mpdu_ack_count = %u\n",
+ htt_stats_buf->false_mpdu_ack_count);
+ len += scnprintf(buf + len, buf_len - len, "txq_timeout = %u\n\n",
+ htt_stats_buf->txq_timeout);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_difs_latency_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_difs_latency_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "hist_intvl = %u\n",
+ htt_stats_buf->hist_intvl);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->difs_latency_hist,
+ "difs_latency_hist", data_len, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_cmd_result_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_cmd_result_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 data_len;
+
+ data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_RESULT_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_result, "cmd_result",
+ data_len, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_cmd_stall_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_cmd_stall_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems;
+
+ num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_STALL_STATS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_stall_status,
+ "cmd_stall_status", num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_fes_result_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_fes_result_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems;
+
+ num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_FES_RESULT_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fes_result, "fes_result",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u32 num_elements = ((tag_len -
+ sizeof(htt_stats_buf->hist_bin_size)) >> 2);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n",
+ htt_stats_buf->hist_bin_size);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist,
+ "tried_mpdu_cnt_hist", num_elements, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_txop_used_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u32 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->txop_used_cnt_hist,
+ "txop_used_cnt_hist", num_elements, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_sounding_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ s32 i;
+ const struct htt_tx_sounding_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ const u32 *cbf_20 = htt_stats_buf->cbf_20;
+ const u32 *cbf_40 = htt_stats_buf->cbf_40;
+ const u32 *cbf_80 = htt_stats_buf->cbf_80;
+ const u32 *cbf_160 = htt_stats_buf->cbf_160;
+
+ if (htt_stats_buf->tx_sounding_mode == HTT_TX_AC_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_AC_SOUNDING_STATS_TLV:\n\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
+ }
+ } else if (htt_stats_buf->tx_sounding_mode == HTT_TX_AX_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
+ }
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "su_bar = %u\n",
+ htt_stats_buf->su_bar);
+ len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
+ htt_stats_buf->rts);
+ len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
+ htt_stats_buf->cts2self);
+ len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
+ htt_stats_buf->qos_null);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_1 = %u\n",
+ htt_stats_buf->delayed_bar_1);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_2 = %u\n",
+ htt_stats_buf->delayed_bar_2);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_3 = %u\n",
+ htt_stats_buf->delayed_bar_3);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_4 = %u\n",
+ htt_stats_buf->delayed_bar_4);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_5 = %u\n",
+ htt_stats_buf->delayed_bar_5);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_6 = %u\n",
+ htt_stats_buf->delayed_bar_6);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_7 = %u\n\n",
+ htt_stats_buf->delayed_bar_7);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ac_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa = %u\n",
+ htt_stats_buf->ac_su_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndp = %u\n",
+ htt_stats_buf->ac_su_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_1);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_2);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u\n\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_3);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ax_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa = %u\n",
+ htt_stats_buf->ax_su_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndp = %u\n",
+ htt_stats_buf->ax_su_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_1);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_2);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_3);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_4);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_5);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_6);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_7);
+ len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
+ htt_stats_buf->ax_basic_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_trigger = %u\n",
+ htt_stats_buf->ax_ulmumimo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n",
+ htt_stats_buf->ax_bsr_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n",
+ htt_stats_buf->ax_mu_bar_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n\n",
+ htt_stats_buf->ax_mu_rts_trigger);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ac_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_err = %u\n",
+ htt_stats_buf->ac_su_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_err = %u\n",
+ htt_stats_buf->ac_su_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_brp1_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_brp2_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n\n",
+ htt_stats_buf->ac_mu_mimo_brp3_err);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ax_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_err = %u\n",
+ htt_stats_buf->ax_su_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_err = %u\n",
+ htt_stats_buf->ax_su_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp1_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp2_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp3_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp4_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp5_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp6_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp7_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n",
+ htt_stats_buf->ax_basic_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_trigger_err = %u\n",
+ htt_stats_buf->ax_ulmumimo_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n",
+ htt_stats_buf->ax_bsr_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n",
+ htt_stats_buf->ax_mu_bar_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n\n",
+ htt_stats_buf->ax_mu_rts_trigger_err);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_posted_per_group_index %u = %u\n",
+ i, htt_stats_buf->ac_mu_mimo_sch_posted_per_grp_sz[i]);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_sch_posted_per_group_index %u = %u\n",
+ i, htt_stats_buf->ax_mu_mimo_sch_posted_per_grp_sz[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_basic_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_basic_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_bsr_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_bsr_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_sch_bar_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_bar_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_brp_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_brp_sch_nusers[i]);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n11ax UL MUMIO SCH STATS:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_mumimo_basic_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_mumimo_basic_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_mumimo_brp_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_mumimo_brp_sch_nusers[i]);
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) {
+ if (!htt_stats_buf->user_index)
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n");
+
+ if (htt_stats_buf->user_index <
+ HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_queued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_tried_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_failed_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_err_no_ba_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
+ }
+ }
+
+ if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) {
+ if (!htt_stats_buf->user_index)
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n");
+
+ if (htt_stats_buf->user_index <
+ HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_queued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_tried_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_failed_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_err_no_ba_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
+ }
+ }
+
+ if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) {
+ if (!htt_stats_buf->user_index)
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n");
+
+ if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_queued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_tried_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_failed_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_requeued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_err_no_ba_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
+ }
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_cmd_posted_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_cmd_posted_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_posted,
+ "sched_cmd_posted", num_elements, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_cmd_reaped_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_cmd_reaped_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_reaped,
+ "sched_cmd_reaped", num_elements, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_sched_order_su_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_sched_order_su_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ /* each entry is u32, i.e. 4 bytes */
+ u32 sched_order_su_num_entries =
+ min_t(u32, (tag_len >> 2), HTT_TX_PDEV_NUM_SCHED_ORDER_LOG);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_order_su, "sched_order_su",
+ sched_order_su_num_entries, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_sched_ineligibility_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_sched_ineligibility_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ /* each entry is u32, i.e. 4 bytes */
+ u32 sched_ineligibility_num_entries = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_ineligibility,
+ "sched_ineligibility", sched_ineligibility_num_entries,
+ "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID,
+ htt_stats_buf->mac_id__txq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "txq_id = %lu\n",
+ FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID,
+ htt_stats_buf->mac_id__txq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n",
+ htt_stats_buf->sched_policy);
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_posted_timestamp = %u\n",
+ htt_stats_buf->last_sched_cmd_posted_timestamp);
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_compl_timestamp = %u\n",
+ htt_stats_buf->last_sched_cmd_compl_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n",
+ htt_stats_buf->sched_2_tac_lwm_count);
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n",
+ htt_stats_buf->sched_2_tac_ring_full);
+ len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n",
+ htt_stats_buf->sched_cmd_post_failure);
+ len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n",
+ htt_stats_buf->num_active_tids);
+ len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n",
+ htt_stats_buf->num_ps_schedules);
+ len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n",
+ htt_stats_buf->sched_cmds_pending);
+ len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n",
+ htt_stats_buf->num_tid_register);
+ len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n",
+ htt_stats_buf->num_tid_unregister);
+ len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n",
+ htt_stats_buf->num_qstats_queried);
+ len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n",
+ htt_stats_buf->qstats_update_pending);
+ len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n",
+ htt_stats_buf->last_qstats_query_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n",
+ htt_stats_buf->num_tqm_cmdq_full);
+ len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n",
+ htt_stats_buf->num_de_sched_algo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n",
+ htt_stats_buf->num_rt_sched_algo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n",
+ htt_stats_buf->num_tqm_sched_algo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n\n",
+ htt_stats_buf->notify_sched);
+ len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n\n",
+ htt_stats_buf->dur_based_sendn_term);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n",
+ htt_stats_buf->current_timestamp);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_gen_mpdu_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_gen_mpdu_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = min_t(u16, (tag_len >> 2),
+ HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->gen_mpdu_end_reason,
+ "gen_mpdu_end_reason", num_elements, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_list_mpdu_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_list_mpdu_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_end_reason,
+ "list_mpdu_end_reason", num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_list_mpdu_cnt_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_list_mpdu_cnt_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_cnt_hist,
+ "list_mpdu_cnt_hist", num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_pdev_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_pdev_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n",
+ htt_stats_buf->msdu_count);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n",
+ htt_stats_buf->mpdu_count);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n",
+ htt_stats_buf->remove_msdu);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n",
+ htt_stats_buf->remove_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n",
+ htt_stats_buf->remove_msdu_ttl);
+ len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n",
+ htt_stats_buf->send_bar);
+ len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n",
+ htt_stats_buf->bar_sync);
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n",
+ htt_stats_buf->notify_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+ htt_stats_buf->sync_cmd);
+ len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+ htt_stats_buf->write_cmd);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n",
+ htt_stats_buf->hwsch_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ htt_stats_buf->ack_tlv_proc);
+ len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n",
+ htt_stats_buf->gen_list_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n",
+ htt_stats_buf->remove_mpdu_tried_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+ htt_stats_buf->remove_msdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n",
+ htt_stats_buf->remove_msdu_ttl_cmd);
+ len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+ htt_stats_buf->flush_cache_cmd);
+ len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+ htt_stats_buf->update_mpduq_cmd);
+ len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n",
+ htt_stats_buf->enqueue);
+ len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n",
+ htt_stats_buf->enqueue_notify);
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n",
+ htt_stats_buf->notify_mpdu_at_head);
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n",
+ htt_stats_buf->notify_mpdu_state_valid);
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n",
+ htt_stats_buf->sched_udp_notify1);
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n",
+ htt_stats_buf->sched_udp_notify2);
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n",
+ htt_stats_buf->sched_nonudp_notify1);
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n\n",
+ htt_stats_buf->sched_nonudp_notify2);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n",
+ htt_stats_buf->max_cmdq_id);
+ len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n",
+ htt_stats_buf->list_mpdu_cnt_hist_intvl);
+ len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n",
+ htt_stats_buf->add_msdu);
+ len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n",
+ htt_stats_buf->q_empty);
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n",
+ htt_stats_buf->q_not_empty);
+ len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n",
+ htt_stats_buf->drop_notification);
+ len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n\n",
+ htt_stats_buf->desc_threshold);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_error_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n",
+ htt_stats_buf->q_empty_failure);
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n",
+ htt_stats_buf->q_not_empty_failure);
+ len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n\n",
+ htt_stats_buf->add_msdu_failure);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_cmdq_status_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_cmdq_status_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_MAC_ID,
+ htt_stats_buf->mac_id__cmdq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "cmdq_id = %lu\n\n",
+ FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID,
+ htt_stats_buf->mac_id__cmdq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+ htt_stats_buf->sync_cmd);
+ len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+ htt_stats_buf->write_cmd);
+ len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+ htt_stats_buf->remove_msdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+ htt_stats_buf->flush_cache_cmd);
+ len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+ htt_stats_buf->update_mpduq_cmd);
+ len += scnprintf(buf + len, buf_len - len, "update_msduq_cmd = %u\n\n",
+ htt_stats_buf->update_msduq_cmd);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_eapol_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "m1_packets = %u\n",
+ htt_stats_buf->m1_packets);
+ len += scnprintf(buf + len, buf_len - len, "m2_packets = %u\n",
+ htt_stats_buf->m2_packets);
+ len += scnprintf(buf + len, buf_len - len, "m3_packets = %u\n",
+ htt_stats_buf->m3_packets);
+ len += scnprintf(buf + len, buf_len - len, "m4_packets = %u\n",
+ htt_stats_buf->m4_packets);
+ len += scnprintf(buf + len, buf_len - len, "g1_packets = %u\n",
+ htt_stats_buf->g1_packets);
+ len += scnprintf(buf + len, buf_len - len, "g2_packets = %u\n\n",
+ htt_stats_buf->g2_packets);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_classify_failed_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ap_bss_peer_not_found = %u\n",
+ htt_stats_buf->ap_bss_peer_not_found);
+ len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u\n",
+ htt_stats_buf->ap_bcast_mcast_no_peer);
+ len += scnprintf(buf + len, buf_len - len, "sta_delete_in_progress = %u\n",
+ htt_stats_buf->sta_delete_in_progress);
+ len += scnprintf(buf + len, buf_len - len, "ibss_no_bss_peer = %u\n",
+ htt_stats_buf->ibss_no_bss_peer);
+ len += scnprintf(buf + len, buf_len - len, "invalid_vdev_type = %u\n",
+ htt_stats_buf->invalid_vdev_type);
+ len += scnprintf(buf + len, buf_len - len, "invalid_ast_peer_entry = %u\n",
+ htt_stats_buf->invalid_ast_peer_entry);
+ len += scnprintf(buf + len, buf_len - len, "peer_entry_invalid = %u\n",
+ htt_stats_buf->peer_entry_invalid);
+ len += scnprintf(buf + len, buf_len - len, "ethertype_not_ip = %u\n",
+ htt_stats_buf->ethertype_not_ip);
+ len += scnprintf(buf + len, buf_len - len, "eapol_lookup_failed = %u\n",
+ htt_stats_buf->eapol_lookup_failed);
+ len += scnprintf(buf + len, buf_len - len, "qpeer_not_allow_data = %u\n",
+ htt_stats_buf->qpeer_not_allow_data);
+ len += scnprintf(buf + len, buf_len - len, "fse_tid_override = %u\n",
+ htt_stats_buf->fse_tid_override);
+ len += scnprintf(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u\n",
+ htt_stats_buf->ipv6_jumbogram_zero_length);
+ len += scnprintf(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n\n",
+ htt_stats_buf->qos_to_non_qos_in_prog);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_classify_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "arp_packets = %u\n",
+ htt_stats_buf->arp_packets);
+ len += scnprintf(buf + len, buf_len - len, "igmp_packets = %u\n",
+ htt_stats_buf->igmp_packets);
+ len += scnprintf(buf + len, buf_len - len, "dhcp_packets = %u\n",
+ htt_stats_buf->dhcp_packets);
+ len += scnprintf(buf + len, buf_len - len, "host_inspected = %u\n",
+ htt_stats_buf->host_inspected);
+ len += scnprintf(buf + len, buf_len - len, "htt_included = %u\n",
+ htt_stats_buf->htt_included);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_mcs = %u\n",
+ htt_stats_buf->htt_valid_mcs);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_nss = %u\n",
+ htt_stats_buf->htt_valid_nss);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_preamble_type = %u\n",
+ htt_stats_buf->htt_valid_preamble_type);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_chainmask = %u\n",
+ htt_stats_buf->htt_valid_chainmask);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_guard_interval = %u\n",
+ htt_stats_buf->htt_valid_guard_interval);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_retries = %u\n",
+ htt_stats_buf->htt_valid_retries);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_bw_info = %u\n",
+ htt_stats_buf->htt_valid_bw_info);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_power = %u\n",
+ htt_stats_buf->htt_valid_power);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x\n",
+ htt_stats_buf->htt_valid_key_flags);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_no_encryption = %u\n",
+ htt_stats_buf->htt_valid_no_encryption);
+ len += scnprintf(buf + len, buf_len - len, "fse_entry_count = %u\n",
+ htt_stats_buf->fse_entry_count);
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_be = %u\n",
+ htt_stats_buf->fse_priority_be);
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_high = %u\n",
+ htt_stats_buf->fse_priority_high);
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_low = %u\n",
+ htt_stats_buf->fse_priority_low);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_be);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_over_sub);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_bursty);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_interactive);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_periodic);
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_alloc = %u\n",
+ htt_stats_buf->fse_hwqueue_alloc);
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_created = %u\n",
+ htt_stats_buf->fse_hwqueue_created);
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u\n",
+ htt_stats_buf->fse_hwqueue_send_to_host);
+ len += scnprintf(buf + len, buf_len - len, "mcast_entry = %u\n",
+ htt_stats_buf->mcast_entry);
+ len += scnprintf(buf + len, buf_len - len, "bcast_entry = %u\n",
+ htt_stats_buf->bcast_entry);
+ len += scnprintf(buf + len, buf_len - len, "htt_update_peer_cache = %u\n",
+ htt_stats_buf->htt_update_peer_cache);
+ len += scnprintf(buf + len, buf_len - len, "htt_learning_frame = %u\n",
+ htt_stats_buf->htt_learning_frame);
+ len += scnprintf(buf + len, buf_len - len, "fse_invalid_peer = %u\n",
+ htt_stats_buf->fse_invalid_peer);
+ len += scnprintf(buf + len, buf_len - len, "mec_notify = %u\n\n",
+ htt_stats_buf->mec_notify);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_classify_status_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "eok = %u\n",
+ htt_stats_buf->eok);
+ len += scnprintf(buf + len, buf_len - len, "classify_done = %u\n",
+ htt_stats_buf->classify_done);
+ len += scnprintf(buf + len, buf_len - len, "lookup_failed = %u\n",
+ htt_stats_buf->lookup_failed);
+ len += scnprintf(buf + len, buf_len - len, "send_host_dhcp = %u\n",
+ htt_stats_buf->send_host_dhcp);
+ len += scnprintf(buf + len, buf_len - len, "send_host_mcast = %u\n",
+ htt_stats_buf->send_host_mcast);
+ len += scnprintf(buf + len, buf_len - len, "send_host_unknown_dest = %u\n",
+ htt_stats_buf->send_host_unknown_dest);
+ len += scnprintf(buf + len, buf_len - len, "send_host = %u\n",
+ htt_stats_buf->send_host);
+ len += scnprintf(buf + len, buf_len - len, "status_invalid = %u\n\n",
+ htt_stats_buf->status_invalid);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_enqueue_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "enqueued_pkts = %u\n",
+ htt_stats_buf->enqueued_pkts);
+ len += scnprintf(buf + len, buf_len - len, "to_tqm = %u\n",
+ htt_stats_buf->to_tqm);
+ len += scnprintf(buf + len, buf_len - len, "to_tqm_bypass = %u\n\n",
+ htt_stats_buf->to_tqm_bypass);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_enqueue_discard_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "discarded_pkts = %u\n",
+ htt_stats_buf->discarded_pkts);
+ len += scnprintf(buf + len, buf_len - len, "local_frames = %u\n",
+ htt_stats_buf->local_frames);
+ len += scnprintf(buf + len, buf_len - len, "is_ext_msdu = %u\n\n",
+ htt_stats_buf->is_ext_msdu);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_de_compl_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_compl_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "tcl_dummy_frame = %u\n",
+ htt_stats_buf->tcl_dummy_frame);
+ len += scnprintf(buf + len, buf_len - len, "tqm_dummy_frame = %u\n",
+ htt_stats_buf->tqm_dummy_frame);
+ len += scnprintf(buf + len, buf_len - len, "tqm_notify_frame = %u\n",
+ htt_stats_buf->tqm_notify_frame);
+ len += scnprintf(buf + len, buf_len - len, "fw2wbm_enq = %u\n",
+ htt_stats_buf->fw2wbm_enq);
+ len += scnprintf(buf + len, buf_len - len, "tqm_bypass_frame = %u\n\n",
+ htt_stats_buf->tqm_bypass_frame);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_fw2wbm_ring_full_hist_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_fw2wbm_ring_full_hist_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw2wbm_ring_full_hist,
+ "fw2wbm_ring_full_hist", num_elements, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "tcl2fw_entry_count = %u\n",
+ htt_stats_buf->tcl2fw_entry_count);
+ len += scnprintf(buf + len, buf_len - len, "not_to_fw = %u\n",
+ htt_stats_buf->not_to_fw);
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u\n",
+ htt_stats_buf->invalid_pdev_vdev_peer);
+ len += scnprintf(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u\n",
+ htt_stats_buf->tcl_res_invalid_addrx);
+ len += scnprintf(buf + len, buf_len - len, "wbm2fw_entry_count = %u\n",
+ htt_stats_buf->wbm2fw_entry_count);
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev = %u\n\n",
+ htt_stats_buf->invalid_pdev);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_ring_if_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_ring_if_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "base_addr = %u\n",
+ htt_stats_buf->base_addr);
+ len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
+ htt_stats_buf->elem_size);
+ len += scnprintf(buf + len, buf_len - len, "num_elems = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_NUM_ELEMS,
+ htt_stats_buf->num_elems__prefetch_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "prefetch_tail_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX,
+ htt_stats_buf->num_elems__prefetch_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "head_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_HEAD_IDX,
+ htt_stats_buf->head_idx__tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "tail_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_TAIL_IDX,
+ htt_stats_buf->head_idx__tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "shadow_head_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_SHADOW_HEAD_IDX,
+ htt_stats_buf->shadow_head_idx__shadow_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "shadow_tail_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_SHADOW_TAIL_IDX,
+ htt_stats_buf->shadow_head_idx__shadow_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "num_tail_incr = %u\n",
+ htt_stats_buf->num_tail_incr);
+ len += scnprintf(buf + len, buf_len - len, "lwm_thresh = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_LWM_THRESH,
+ htt_stats_buf->lwm_thresh__hwm_thresh));
+ len += scnprintf(buf + len, buf_len - len, "hwm_thresh = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_HWM_THRESH,
+ htt_stats_buf->lwm_thresh__hwm_thresh));
+ len += scnprintf(buf + len, buf_len - len, "overrun_hit_count = %u\n",
+ htt_stats_buf->overrun_hit_count);
+ len += scnprintf(buf + len, buf_len - len, "underrun_hit_count = %u\n",
+ htt_stats_buf->underrun_hit_count);
+ len += scnprintf(buf + len, buf_len - len, "prod_blockwait_count = %u\n",
+ htt_stats_buf->prod_blockwait_count);
+ len += scnprintf(buf + len, buf_len - len, "cons_blockwait_count = %u\n",
+ htt_stats_buf->cons_blockwait_count);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->low_wm_hit_count,
+ "low_wm_hit_count", HTT_STATS_LOW_WM_BINS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->high_wm_hit_count,
+ "high_wm_hit_count", HTT_STATS_HIGH_WM_BINS, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_ring_if_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_ring_if_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ htt_stats_buf->num_records);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_client_user_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sfm_client_user_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->dwords_used_by_user_n,
+ "dwords_used_by_user_n", num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_client_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sfm_client_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "client_id = %u\n",
+ htt_stats_buf->client_id);
+ len += scnprintf(buf + len, buf_len - len, "buf_min = %u\n",
+ htt_stats_buf->buf_min);
+ len += scnprintf(buf + len, buf_len - len, "buf_max = %u\n",
+ htt_stats_buf->buf_max);
+ len += scnprintf(buf + len, buf_len - len, "buf_busy = %u\n",
+ htt_stats_buf->buf_busy);
+ len += scnprintf(buf + len, buf_len - len, "buf_alloc = %u\n",
+ htt_stats_buf->buf_alloc);
+ len += scnprintf(buf + len, buf_len - len, "buf_avail = %u\n",
+ htt_stats_buf->buf_avail);
+ len += scnprintf(buf + len, buf_len - len, "num_users = %u\n\n",
+ htt_stats_buf->num_users);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sfm_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "buf_total = %u\n",
+ htt_stats_buf->buf_total);
+ len += scnprintf(buf + len, buf_len - len, "mem_empty = %u\n",
+ htt_stats_buf->mem_empty);
+ len += scnprintf(buf + len, buf_len - len, "deallocate_bufs = %u\n",
+ htt_stats_buf->deallocate_bufs);
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ htt_stats_buf->num_records);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sring_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sring_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_MAC_ID,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "ring_id = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_RING_ID,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "arena = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_ARENA,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "ep = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_EP,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "base_addr_lsb = 0x%x\n",
+ htt_stats_buf->base_addr_lsb);
+ len += scnprintf(buf + len, buf_len - len, "base_addr_msb = 0x%x\n",
+ htt_stats_buf->base_addr_msb);
+ len += scnprintf(buf + len, buf_len - len, "ring_size = %u\n",
+ htt_stats_buf->ring_size);
+ len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
+ htt_stats_buf->elem_size);
+ len += scnprintf(buf + len, buf_len - len, "num_avail_words = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_NUM_AVAIL_WORDS,
+ htt_stats_buf->num_avail_words__num_valid_words));
+ len += scnprintf(buf + len, buf_len - len, "num_valid_words = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_NUM_VALID_WORDS,
+ htt_stats_buf->num_avail_words__num_valid_words));
+ len += scnprintf(buf + len, buf_len - len, "head_ptr = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_HEAD_PTR,
+ htt_stats_buf->head_ptr__tail_ptr));
+ len += scnprintf(buf + len, buf_len - len, "tail_ptr = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_TAIL_PTR,
+ htt_stats_buf->head_ptr__tail_ptr));
+ len += scnprintf(buf + len, buf_len - len, "consumer_empty = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_CONSUMER_EMPTY,
+ htt_stats_buf->consumer_empty__producer_full));
+ len += scnprintf(buf + len, buf_len - len, "producer_full = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_PRODUCER_FULL,
+ htt_stats_buf->consumer_empty__producer_full));
+ len += scnprintf(buf + len, buf_len - len, "prefetch_count = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_PREFETCH_COUNT,
+ htt_stats_buf->prefetch_count__internal_tail_ptr));
+ len += scnprintf(buf + len, buf_len - len, "internal_tail_ptr = %lu\n\n",
+ FIELD_GET(HTT_SRING_STATS_INTERNAL_TAIL_PTR,
+ htt_stats_buf->prefetch_count__internal_tail_ptr));
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sring_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sring_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ htt_stats_buf->num_records);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 j;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
+ htt_stats_buf->tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u\n",
+ htt_stats_buf->ac_mu_mimo_tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u\n",
+ htt_stats_buf->ax_mu_mimo_tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "ofdma_tx_ldpc = %u\n",
+ htt_stats_buf->ofdma_tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rts_success = %u\n",
+ htt_stats_buf->rts_success);
+ len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
+ htt_stats_buf->ack_rssi);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u\n",
+ htt_stats_buf->tx_legacy_cck_rate[0],
+ htt_stats_buf->tx_legacy_cck_rate[1],
+ htt_stats_buf->tx_legacy_cck_rate[2],
+ htt_stats_buf->tx_legacy_cck_rate[3]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
+ " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u\n",
+ htt_stats_buf->tx_legacy_ofdm_rate[0],
+ htt_stats_buf->tx_legacy_ofdm_rate[1],
+ htt_stats_buf->tx_legacy_ofdm_rate[2],
+ htt_stats_buf->tx_legacy_ofdm_rate[3],
+ htt_stats_buf->tx_legacy_ofdm_rate[4],
+ htt_stats_buf->tx_legacy_ofdm_rate[5],
+ htt_stats_buf->tx_legacy_ofdm_rate[6],
+ htt_stats_buf->tx_legacy_ofdm_rate[7]);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_mcs,
+ "ac_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_mcs,
+ "ax_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_mcs, "ofdma_tx_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_nss,
+ "ac_mu_mimo_tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_nss,
+ "ax_mu_mimo_tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_nss, "ofdma_tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_bw,
+ "ac_mu_mimo_tx_bw", HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_bw,
+ "ax_mu_mimo_tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_bw, "ofdma_tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream",
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u\n",
+ htt_stats_buf->tx_he_ltf[1],
+ htt_stats_buf->tx_he_ltf[2],
+ htt_stats_buf->tx_he_ltf[3]);
+
+ /* SU GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ /* AC MU-MIMO GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "ac_mu_mimo_tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_gi[j],
+ NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ /* AX MU-MIMO GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "ax_mu_mimo_tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_gi[j],
+ NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ /* DL OFDMA GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "ofdma_tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_gi[j], NULL,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm",
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 i, j;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
+ htt_stats_buf->nsts);
+ len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+ htt_stats_buf->rx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
+ htt_stats_buf->rssi_mgmt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
+ htt_stats_buf->rssi_data);
+ len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
+ htt_stats_buf->rssi_comb);
+ len += scnprintf(buf + len, buf_len - len, "rssi_in_dbm = %d\n",
+ htt_stats_buf->rssi_in_dbm);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss",
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm",
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw",
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "rx_evm_nss_count = %u\n",
+ htt_stats_buf->nss_count);
+
+ len += scnprintf(buf + len, buf_len - len, "rx_evm_pilot_count = %u\n",
+ htt_stats_buf->pilot_count);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "pilot_evm_db[%u] = ", j);
+ for (i = 0; i < HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_pilot_evm_db[j][i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "pilot_evm_db_mean = ");
+ for (i = 0; i < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,", i,
+ htt_stats_buf->rx_pilot_evm_db_mean[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rssi_chain[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ }
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream",
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_su_ext = %u\n",
+ htt_stats_buf->rx_11ax_su_ext);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ac_mumimo = %u\n",
+ htt_stats_buf->rx_11ac_mumimo);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_mumimo = %u\n",
+ htt_stats_buf->rx_11ax_mumimo);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ofdma = %u\n",
+ htt_stats_buf->rx_11ax_ofdma);
+ len += scnprintf(buf + len, buf_len - len, "txbf = %u\n",
+ htt_stats_buf->txbf);
+
+ len += scnprintf(buf + len, buf_len - len, "\nrx_su_ndpa = %u",
+ htt_stats_buf->rx_su_ndpa);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_su_txbf_mcs,
+ "rx_11ax_su_txbf_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "\nrx_mu_ndpa = %u",
+ htt_stats_buf->rx_mu_ndpa);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_mu_txbf_mcs,
+ "rx_11ax_mu_txbf_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "\nrx_br_poll = %u",
+ htt_stats_buf->rx_br_poll);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_cck_rate,
+ "rx_legacy_cck_rate",
+ HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_ofdm_rate,
+ "rx_legacy_ofdm_rate",
+ HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_low = %u\n",
+ htt_stats_buf->rx_active_dur_us_low);
+ len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_high = %u\n",
+ htt_stats_buf->rx_active_dur_us_high);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n",
+ htt_stats_buf->rx_11ax_ul_ofdma);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_mcs,
+ "ul_ofdma_rx_mcs",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_ofdma_rx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_gi[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_nss,
+ "ul_ofdma_rx_nss",
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_bw, "ul_ofdma_rx_bw",
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n",
+ htt_stats_buf->ul_ofdma_rx_stbc);
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n",
+ htt_stats_buf->ul_ofdma_rx_ldpc);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_ppdu,
+ "rx_ulofdma_non_data_ppdu",
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_ppdu,
+ "rx_ulofdma_data_ppdu", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_ok,
+ "rx_ulofdma_mpdu_ok", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_fail,
+ "rx_ulofdma_mpdu_fail", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_fd_rssi: nss[%u] = ", j);
+ for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_nusers,
+ "rx_ulofdma_non_data_nusers", HTT_RX_PDEV_MAX_OFDMA_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_nusers,
+ "rx_ulofdma_data_nusers", HTT_RX_PDEV_MAX_OFDMA_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_mcs,
+ "rx_11ax_dl_ofdma_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_ru,
+ "rx_11ax_dl_ofdma_ru", HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_non_data_ppdu,
+ "rx_ulmumimo_non_data_ppdu", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_data_ppdu,
+ "rx_ulmumimo_data_ppdu", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_mpdu_ok,
+ "rx_ulmumimo_mpdu_ok", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_mpdu_fail,
+ "rx_ulmumimo_mpdu_fail", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n",
+ htt_stats_buf->per_chain_rssi_pkt_type);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_per_chain_rssi_in_dbm[%u] = ", j);
+ for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u\n",
+ htt_stats_buf->fw_reo_ring_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u\n",
+ htt_stats_buf->fw_to_host_data_msdu_bcmc);
+ len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u\n",
+ htt_stats_buf->fw_to_host_data_msdu_uc);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_remote_data_buf_recycle_cnt = %u\n",
+ htt_stats_buf->ofld_remote_data_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_remote_free_buf_indication_cnt = %u\n",
+ htt_stats_buf->ofld_remote_free_buf_indication_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_buf_to_host_data_msdu_uc = %u\n",
+ htt_stats_buf->ofld_buf_to_host_data_msdu_uc);
+ len += scnprintf(buf + len, buf_len - len,
+ "reo_fw_ring_to_host_data_msdu_uc = %u\n",
+ htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc);
+ len += scnprintf(buf + len, buf_len - len, "wbm_sw_ring_reap = %u\n",
+ htt_stats_buf->wbm_sw_ring_reap);
+ len += scnprintf(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u\n",
+ htt_stats_buf->wbm_forward_to_host_cnt);
+ len += scnprintf(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u\n",
+ htt_stats_buf->wbm_target_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "target_refill_ring_recycle_cnt = %u\n",
+ htt_stats_buf->target_refill_ring_recycle_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_empty_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_empty_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_empty_cnt,
+ "refill_ring_empty_cnt", num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_RXDMA_MAX_ERR_CODE);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rxdma_err, "rxdma_err",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_REO_MAX_ERR_CODE);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reo_err, "reo_err",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_reo_debug_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_reo_resource_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "sample_id = %u\n",
+ htt_stats_buf->sample_id);
+ len += scnprintf(buf + len, buf_len - len, "total_max = %u\n",
+ htt_stats_buf->total_max);
+ len += scnprintf(buf + len, buf_len - len, "total_avg = %u\n",
+ htt_stats_buf->total_avg);
+ len += scnprintf(buf + len, buf_len - len, "total_sample = %u\n",
+ htt_stats_buf->total_sample);
+ len += scnprintf(buf + len, buf_len - len, "non_zeros_avg = %u\n",
+ htt_stats_buf->non_zeros_avg);
+ len += scnprintf(buf + len, buf_len - len, "non_zeros_sample = %u\n",
+ htt_stats_buf->non_zeros_sample);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_max = %u\n",
+ htt_stats_buf->last_non_zeros_max);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_min %u\n",
+ htt_stats_buf->last_non_zeros_min);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_avg %u\n",
+ htt_stats_buf->last_non_zeros_avg);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_sample %u\n\n",
+ htt_stats_buf->last_non_zeros_sample);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_num_refill,
+ "refill_ring_num_refill", num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_pdev_fw_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "ppdu_recvd = %u\n",
+ htt_stats_buf->ppdu_recvd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u\n",
+ htt_stats_buf->mpdu_cnt_fcs_ok);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u\n",
+ htt_stats_buf->mpdu_cnt_fcs_err);
+ len += scnprintf(buf + len, buf_len - len, "tcp_msdu_cnt = %u\n",
+ htt_stats_buf->tcp_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u\n",
+ htt_stats_buf->tcp_ack_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "udp_msdu_cnt = %u\n",
+ htt_stats_buf->udp_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "other_msdu_cnt = %u\n",
+ htt_stats_buf->other_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u\n",
+ htt_stats_buf->fw_ring_mpdu_ind);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mgmt_subtype,
+ "fw_ring_mgmt_subtype", HTT_STATS_SUBTYPE_MAX, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_ctrl_subtype,
+ "fw_ring_ctrl_subtype", HTT_STATS_SUBTYPE_MAX, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_mcast_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_bcast_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_ucast_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_null_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u\n",
+ htt_stats_buf->fw_ring_mpdu_drop);
+ len += scnprintf(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u\n",
+ htt_stats_buf->ofld_local_data_ind_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_local_data_buf_recycle_cnt = %u\n",
+ htt_stats_buf->ofld_local_data_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u\n",
+ htt_stats_buf->drx_local_data_ind_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "drx_local_data_buf_recycle_cnt = %u\n",
+ htt_stats_buf->drx_local_data_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "local_nondata_ind_cnt = %u\n",
+ htt_stats_buf->local_nondata_ind_cnt);
+ len += scnprintf(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u\n",
+ htt_stats_buf->local_nondata_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->fw_status_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->fw_status_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->fw_pkt_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->fw_pkt_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->fw_link_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->fw_link_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->host_pkt_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->host_pkt_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->mon_pkt_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->mon_pkt_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "mon_status_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->mon_status_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->mon_status_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->mon_desc_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->mon_desc_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u\n",
+ htt_stats_buf->mon_dest_ring_update_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u\n",
+ htt_stats_buf->mon_dest_ring_full_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_suspend_cnt = %u\n",
+ htt_stats_buf->rx_suspend_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u\n",
+ htt_stats_buf->rx_suspend_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_resume_cnt = %u\n",
+ htt_stats_buf->rx_resume_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_resume_fail_cnt = %u\n",
+ htt_stats_buf->rx_resume_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_ring_switch_cnt = %u\n",
+ htt_stats_buf->rx_ring_switch_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_ring_restore_cnt = %u\n",
+ htt_stats_buf->rx_ring_restore_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n",
+ htt_stats_buf->rx_flush_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n\n",
+ htt_stats_buf->rx_recovery_reset_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mpdu_err,
+ "fw_ring_mpdu_err", HTT_RX_STATS_RXDMA_MAX_ERR, "\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_mpdu_drop_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_mpdu_drop_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_FW_DROP_REASON_MAX);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_mpdu_drop, "fw_mpdu_drop",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_stats_phy_err_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_stats_phy_err_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id__word = %u\n",
+ htt_stats_buf->mac_id__word);
+ len += scnprintf(buf + len, buf_len - len, "total_phy_err_nct = %u\n",
+ htt_stats_buf->total_phy_err_cnt);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_err, "phy_errs",
+ HTT_STATS_PHY_ERR_MAX, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_cca_stats_hist_v1_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+ htt_stats_buf->chan_num);
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n",
+ htt_stats_buf->num_records);
+ len += scnprintf(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x\n",
+ htt_stats_buf->valid_cca_counters_bitmap);
+ len += scnprintf(buf + len, buf_len - len, "collection_interval = %u\n\n",
+ htt_stats_buf->collection_interval);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_stats_cca_counters_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|\n",
+ htt_stats_buf->tx_frame_usec,
+ htt_stats_buf->rx_frame_usec,
+ htt_stats_buf->rx_clear_usec,
+ htt_stats_buf->my_rx_frame_usec,
+ htt_stats_buf->usec_cnt,
+ htt_stats_buf->med_rx_idle_usec,
+ htt_stats_buf->med_tx_idle_global_usec,
+ htt_stats_buf->cca_obss_usec);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_whal_tx_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n",
+ htt_stats_buf->last_unpause_ppdu_id);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n",
+ htt_stats_buf->hwsch_unpause_wait_tqm_write);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n",
+ htt_stats_buf->hwsch_dummy_tlv_skipped);
+ len += scnprintf(buf + len, buf_len - len,
+ "hwsch_misaligned_offset_received = %u\n",
+ htt_stats_buf->hwsch_misaligned_offset_received);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n",
+ htt_stats_buf->hwsch_reset_count);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n",
+ htt_stats_buf->hwsch_dev_reset_war);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n",
+ htt_stats_buf->hwsch_delayed_pause);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n",
+ htt_stats_buf->hwsch_long_delayed_pause);
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n",
+ htt_stats_buf->sch_rx_ppdu_no_response);
+ len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n",
+ htt_stats_buf->sch_selfgen_response);
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n\n",
+ htt_stats_buf->sch_rx_sifs_resp_trigger);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_twt_sessions_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_stats_twt_sessions_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ htt_stats_buf->pdev_id);
+ len += scnprintf(buf + len, buf_len - len, "num_sessions = %u\n\n",
+ htt_stats_buf->num_sessions);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_twt_session_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_stats_twt_session_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n",
+ htt_stats_buf->vdev_id);
+ len += scnprintf(buf + len, buf_len - len,
+ "peer_mac = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n",
+ FIELD_GET(HTT_MAC_ADDR_L32_0,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_1,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_2,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_3,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_H16_0,
+ htt_stats_buf->peer_mac.mac_addr_h16),
+ FIELD_GET(HTT_MAC_ADDR_H16_1,
+ htt_stats_buf->peer_mac.mac_addr_h16));
+ len += scnprintf(buf + len, buf_len - len, "flow_id_flags = %u\n",
+ htt_stats_buf->flow_id_flags);
+ len += scnprintf(buf + len, buf_len - len, "dialog_id = %u\n",
+ htt_stats_buf->dialog_id);
+ len += scnprintf(buf + len, buf_len - len, "wake_dura_us = %u\n",
+ htt_stats_buf->wake_dura_us);
+ len += scnprintf(buf + len, buf_len - len, "wake_intvl_us = %u\n",
+ htt_stats_buf->wake_intvl_us);
+ len += scnprintf(buf + len, buf_len - len, "sp_offset_us = %u\n\n",
+ htt_stats_buf->sp_offset_us);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_obss_pd_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "OBSS Tx success PPDU = %u\n",
+ htt_stats_buf->num_obss_tx_ppdu_success);
+ len += scnprintf(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n",
+ htt_stats_buf->num_obss_tx_ppdu_failure);
+ len += scnprintf(buf + len, buf_len - len, "Non-SRG Opportunities = %u\n",
+ htt_stats_buf->num_non_srg_opportunities);
+ len += scnprintf(buf + len, buf_len - len, "Non-SRG tried PPDU = %u\n",
+ htt_stats_buf->num_non_srg_ppdu_tried);
+ len += scnprintf(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n",
+ htt_stats_buf->num_non_srg_ppdu_success);
+ len += scnprintf(buf + len, buf_len - len, "SRG Opportunities = %u\n",
+ htt_stats_buf->num_srg_opportunities);
+ len += scnprintf(buf + len, buf_len - len, "SRG tried PPDU = %u\n",
+ htt_stats_buf->num_srg_ppdu_tried);
+ len += scnprintf(buf + len, buf_len - len, "SRG success PPDU = %u\n\n",
+ htt_stats_buf->num_srg_ppdu_success);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf,
+ u8 *data)
+{
+ struct debug_htt_stats_req *stats_req =
+ (struct debug_htt_stats_req *)data;
+ struct htt_ring_backpressure_stats_tlv *htt_stats_buf =
+ (struct htt_ring_backpressure_stats_tlv *)tag_buf;
+ int i;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ htt_stats_buf->pdev_id);
+ len += scnprintf(buf + len, buf_len - len, "current_head_idx = %u\n",
+ htt_stats_buf->current_head_idx);
+ len += scnprintf(buf + len, buf_len - len, "current_tail_idx = %u\n",
+ htt_stats_buf->current_tail_idx);
+ len += scnprintf(buf + len, buf_len - len, "num_htt_msgs_sent = %u\n",
+ htt_stats_buf->num_htt_msgs_sent);
+ len += scnprintf(buf + len, buf_len - len,
+ "backpressure_time_ms = %u\n",
+ htt_stats_buf->backpressure_time_ms);
+
+ for (i = 0; i < 5; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "backpressure_hist_%u = %u\n",
+ i + 1, htt_stats_buf->backpressure_hist[i]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "============================\n");
+
+ if (len >= buf_len) {
+ buf[buf_len - 1] = 0;
+ stats_req->buf_len = buf_len - 1;
+ } else {
+ buf[len] = 0;
+ stats_req->buf_len = len;
+ }
+}
+
+static inline
+void htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n");
+
+ len += scnprintf(buf + len, buf_len - len, "tx_ol_mcs = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ol_mcs[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_mcs = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ibf_mcs[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_mcs =");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_txbf_mcs[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ol_nss = ");
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ol_nss[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_nss = ");
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ibf_nss[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_nss = ");
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_txbf_nss[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ol_bw = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ol_bw[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_bw = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ibf_bw[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_bw = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_txbf_bw[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_ndpa_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_ndpa_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_NDPA_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_queued_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_queued[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_tried_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_tried[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_flushed_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_flushed[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_err_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_err[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_ndp_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_ndp_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_NDP_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_queued_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_queued[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_tried_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_tried[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_flushed_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_flushed[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_err_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_err[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_brp_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_brp_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_BRP_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brpoll_queued_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brpoll_queued[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brpoll_tried_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brpoll_tried[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brpoll_flushed_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brpoll_flushed[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brp_err_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brp_err[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brp_err_num_cbf_rcvd_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brp_err_num_cbf_rcvd[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_steer_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_steer_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_STEER_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_ppdu_steer_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_ppdu_steer[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_ppdu_ol_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_ppdu_ol[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_usrs_prefetch_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_usrs_prefetch[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_usrs_sound_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_usrs_sound[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_usrs_force_sound_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_usrs_force_sound[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_phy_counters_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_phy_counters_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n",
+ htt_stats_buf->rx_ofdma_timing_err_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n",
+ htt_stats_buf->rx_cck_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n",
+ htt_stats_buf->mactx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n",
+ htt_stats_buf->macrx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n",
+ htt_stats_buf->phytx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n",
+ htt_stats_buf->phyrx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n",
+ htt_stats_buf->phyrx_defer_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n",
+ htt_stats_buf->rx_gain_adj_lstf_event_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n",
+ htt_stats_buf->rx_gain_adj_non_legacy_cnt);
+
+ for (i = 0; i < HTT_MAX_RX_PKT_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len, "rx_pkt_cnt[%d] = %u\n",
+ i, htt_stats_buf->rx_pkt_cnt[i]);
+
+ for (i = 0; i < HTT_MAX_RX_PKT_CRC_PASS_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_pkt_crc_pass_cnt[%d] = %u\n",
+ i, htt_stats_buf->rx_pkt_crc_pass_cnt[i]);
+
+ for (i = 0; i < HTT_MAX_PER_BLK_ERR_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "per_blk_err_cnt[%d] = %u\n",
+ i, htt_stats_buf->per_blk_err_cnt[i]);
+
+ for (i = 0; i < HTT_MAX_RX_OTA_ERR_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ota_err_cnt[%d] = %u\n",
+ i, htt_stats_buf->rx_ota_err_cnt[i]);
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_phy_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_phy_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_STATS_MAX_CHAINS; i++)
+ len += scnprintf(buf + len, buf_len - len, "nf_chain[%d] = %d\n",
+ i, htt_stats_buf->nf_chain[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u\n",
+ htt_stats_buf->false_radar_cnt);
+ len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n",
+ htt_stats_buf->radar_cs_cnt);
+ len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n",
+ htt_stats_buf->ani_level);
+ len += scnprintf(buf + len, buf_len - len, "fw_run_time = %u\n",
+ htt_stats_buf->fw_run_time);
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_phy_reset_counters_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_phy_reset_counters_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_COUNTERS_TLV:\n");
+
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ htt_stats_buf->pdev_id);
+ len += scnprintf(buf + len, buf_len - len, "cf_active_low_fail_cnt = %u\n",
+ htt_stats_buf->cf_active_low_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "cf_active_low_pass_cnt = %u\n",
+ htt_stats_buf->cf_active_low_pass_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phy_off_through_vreg_cnt = %u\n",
+ htt_stats_buf->phy_off_through_vreg_cnt);
+ len += scnprintf(buf + len, buf_len - len, "force_calibration_cnt = %u\n",
+ htt_stats_buf->force_calibration_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rf_mode_switch_phy_off_cnt = %u\n",
+ htt_stats_buf->rf_mode_switch_phy_off_cnt);
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_phy_reset_stats_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_phy_reset_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_STATS_TLV:\n");
+
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ htt_stats_buf->pdev_id);
+ len += scnprintf(buf + len, buf_len - len, "chan_mhz = %u\n",
+ htt_stats_buf->chan_mhz);
+ len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq1 = %u\n",
+ htt_stats_buf->chan_band_center_freq1);
+ len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq2 = %u\n",
+ htt_stats_buf->chan_band_center_freq2);
+ len += scnprintf(buf + len, buf_len - len, "chan_phy_mode = %u\n",
+ htt_stats_buf->chan_phy_mode);
+ len += scnprintf(buf + len, buf_len - len, "chan_flags = 0x%0x\n",
+ htt_stats_buf->chan_flags);
+ len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+ htt_stats_buf->chan_num);
+ len += scnprintf(buf + len, buf_len - len, "reset_cause = 0x%0x\n",
+ htt_stats_buf->reset_cause);
+ len += scnprintf(buf + len, buf_len - len, "prev_reset_cause = 0x%0x\n",
+ htt_stats_buf->prev_reset_cause);
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_src = 0x%0x\n",
+ htt_stats_buf->phy_warm_reset_src);
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_tbl_mode = %d\n",
+ htt_stats_buf->rx_gain_tbl_mode);
+ len += scnprintf(buf + len, buf_len - len, "xbar_val = 0x%0x\n",
+ htt_stats_buf->xbar_val);
+ len += scnprintf(buf + len, buf_len - len, "force_calibration = %u\n",
+ htt_stats_buf->force_calibration);
+ len += scnprintf(buf + len, buf_len - len, "phyrf_mode = %u\n",
+ htt_stats_buf->phyrf_mode);
+ len += scnprintf(buf + len, buf_len - len, "phy_homechan = %u\n",
+ htt_stats_buf->phy_homechan);
+ len += scnprintf(buf + len, buf_len - len, "phy_tx_ch_mask = 0x%0x\n",
+ htt_stats_buf->phy_tx_ch_mask);
+ len += scnprintf(buf + len, buf_len - len, "phy_rx_ch_mask = 0x%0x\n",
+ htt_stats_buf->phy_rx_ch_mask);
+ len += scnprintf(buf + len, buf_len - len, "phybb_ini_mask = 0x%0x\n",
+ htt_stats_buf->phybb_ini_mask);
+ len += scnprintf(buf + len, buf_len - len, "phyrf_ini_mask = 0x%0x\n",
+ htt_stats_buf->phyrf_ini_mask);
+ len += scnprintf(buf + len, buf_len - len, "phy_dfs_en_mask = 0x%0x\n",
+ htt_stats_buf->phy_dfs_en_mask);
+ len += scnprintf(buf + len, buf_len - len, "phy_sscan_en_mask = 0x%0x\n",
+ htt_stats_buf->phy_sscan_en_mask);
+ len += scnprintf(buf + len, buf_len - len, "phy_synth_sel_mask = 0x%0x\n",
+ htt_stats_buf->phy_synth_sel_mask);
+ len += scnprintf(buf + len, buf_len - len, "phy_adfs_freq = %u\n",
+ htt_stats_buf->phy_adfs_freq);
+ len += scnprintf(buf + len, buf_len - len, "cck_fir_settings = 0x%0x\n",
+ htt_stats_buf->cck_fir_settings);
+ len += scnprintf(buf + len, buf_len - len, "phy_dyn_pri_chan = %u\n",
+ htt_stats_buf->phy_dyn_pri_chan);
+ len += scnprintf(buf + len, buf_len - len, "cca_thresh = 0x%0x\n",
+ htt_stats_buf->cca_thresh);
+ len += scnprintf(buf + len, buf_len - len, "dyn_cca_status = %u\n",
+ htt_stats_buf->dyn_cca_status);
+ len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_hw = 0x%x\n",
+ htt_stats_buf->rxdesense_thresh_hw);
+ len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_sw = 0x%x\n",
+ htt_stats_buf->rxdesense_thresh_sw);
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_peer_ctrl_path_txrx_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_peer_ctrl_path_txrx_stats_tlv *htt_stat_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+ const char *mgmt_frm_type[ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1] = {
+ "assoc_req", "assoc_resp",
+ "reassoc_req", "reassoc_resp",
+ "probe_req", "probe_resp",
+ "timing_advertisement", "reserved",
+ "beacon", "atim", "disassoc",
+ "auth", "deauth", "action", "action_no_ack"};
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "peer_mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ htt_stat_buf->peer_mac_addr[0], htt_stat_buf->peer_mac_addr[1],
+ htt_stat_buf->peer_mac_addr[2], htt_stat_buf->peer_mac_addr[3],
+ htt_stat_buf->peer_mac_addr[4], htt_stat_buf->peer_mac_addr[5]);
+
+ len += scnprintf(buf + len, buf_len - len, "peer_tx_mgmt_subtype:\n");
+ for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++)
+ len += scnprintf(buf + len, buf_len - len, "%s:%u\n",
+ mgmt_frm_type[i],
+ htt_stat_buf->peer_rx_mgmt_subtype[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "peer_rx_mgmt_subtype:\n");
+ for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++)
+ len += scnprintf(buf + len, buf_len - len, "%s:%u\n",
+ mgmt_frm_type[i],
+ htt_stat_buf->peer_rx_mgmt_subtype[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
+ u16 tag, u16 len, const void *tag_buf,
+ void *user_data)
+{
+ struct debug_htt_stats_req *stats_req = user_data;
+
+ switch (tag) {
+ case HTT_STATS_TX_PDEV_CMN_TAG:
+ htt_print_tx_pdev_stats_cmn_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_UNDERRUN_TAG:
+ htt_print_tx_pdev_stats_urrn_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_TAG:
+ htt_print_tx_pdev_stats_sifs_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_FLUSH_TAG:
+ htt_print_tx_pdev_stats_flush_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_PHY_ERR_TAG:
+ htt_print_tx_pdev_stats_phy_err_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_HIST_TAG:
+ htt_print_tx_pdev_stats_sifs_hist_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_TX_PPDU_STATS_TAG:
+ htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_TRIED_MPDU_CNT_HIST_TAG:
+ htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(tag_buf, len,
+ stats_req);
+ break;
+
+ case HTT_STATS_STRING_TAG:
+ htt_print_stats_string_tlv(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_CMN_TAG:
+ htt_print_tx_hwq_stats_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG:
+ htt_print_tx_hwq_difs_latency_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_CMD_RESULT_TAG:
+ htt_print_tx_hwq_cmd_result_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_CMD_STALL_TAG:
+ htt_print_tx_hwq_cmd_stall_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_FES_STATUS_TAG:
+ htt_print_tx_hwq_fes_result_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_TRIED_MPDU_CNT_HIST_TAG:
+ htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_TXOP_USED_CNT_HIST_TAG:
+ htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_GEN_MPDU_TAG:
+ htt_print_tx_tqm_gen_mpdu_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_LIST_MPDU_TAG:
+ htt_print_tx_tqm_list_mpdu_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG:
+ htt_print_tx_tqm_list_mpdu_cnt_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_CMN_TAG:
+ htt_print_tx_tqm_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_PDEV_TAG:
+ htt_print_tx_tqm_pdev_stats_tlv_v(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_CMDQ_STATUS_TAG:
+ htt_print_tx_tqm_cmdq_status_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_EAPOL_PACKETS_TAG:
+ htt_print_tx_de_eapol_packets_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG:
+ htt_print_tx_de_classify_failed_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CLASSIFY_STATS_TAG:
+ htt_print_tx_de_classify_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG:
+ htt_print_tx_de_classify_status_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG:
+ htt_print_tx_de_enqueue_packets_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG:
+ htt_print_tx_de_enqueue_discard_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_FW2WBM_RING_FULL_HIST_TAG:
+ htt_print_tx_de_fw2wbm_ring_full_hist_tlv(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CMN_TAG:
+ htt_print_tx_de_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RING_IF_TAG:
+ htt_print_ring_if_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG:
+ htt_print_tx_pdev_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SFM_CMN_TAG:
+ htt_print_sfm_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SRING_STATS_TAG:
+ htt_print_sring_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_FW_STATS_TAG:
+ htt_print_rx_pdev_fw_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG:
+ htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG:
+ htt_print_rx_pdev_fw_mpdu_drop_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_SOC_FW_STATS_TAG:
+ htt_print_rx_soc_fw_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG:
+ htt_print_rx_soc_fw_refill_ring_empty_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG:
+ htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(
+ tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_REFILL_RXDMA_ERR_TAG:
+ htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(
+ tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_REFILL_REO_ERR_TAG:
+ htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(
+ tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_REO_RESOURCE_STATS_TAG:
+ htt_print_rx_reo_debug_stats_tlv_v(
+ tag_buf, stats_req);
+ break;
+ case HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG:
+ htt_print_rx_pdev_fw_stats_phy_err_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_RATE_STATS_TAG:
+ htt_print_tx_pdev_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_RATE_STATS_TAG:
+ htt_print_rx_pdev_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG:
+ htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TX_SCHED_CMN_TAG:
+ htt_print_stats_tx_sched_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_MPDU_STATS_TAG:
+ htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG:
+ htt_print_sched_txq_cmd_posted_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RING_IF_CMN_TAG:
+ htt_print_ring_if_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SFM_CLIENT_USER_TAG:
+ htt_print_sfm_client_user_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_SFM_CLIENT_TAG:
+ htt_print_sfm_client_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_ERROR_STATS_TAG:
+ htt_print_tx_tqm_error_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG:
+ htt_print_sched_txq_cmd_reaped_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_SRING_CMN_TAG:
+ htt_print_sring_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SOUNDING_STATS_TAG:
+ htt_print_tx_sounding_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG:
+ htt_print_tx_selfgen_ac_err_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_CMN_STATS_TAG:
+ htt_print_tx_selfgen_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AC_STATS_TAG:
+ htt_print_tx_selfgen_ac_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AX_STATS_TAG:
+ htt_print_tx_selfgen_ax_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG:
+ htt_print_tx_selfgen_ax_err_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG:
+ htt_print_tx_hwq_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG:
+ htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG:
+ htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_HW_INTR_MISC_TAG:
+ htt_print_hw_stats_intr_misc_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_HW_WD_TIMEOUT_TAG:
+ htt_print_hw_stats_wd_timeout_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_HW_PDEV_ERRS_TAG:
+ htt_print_hw_stats_pdev_errs_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_COUNTER_NAME_TAG:
+ htt_print_counter_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TID_DETAILS_TAG:
+ htt_print_tx_tid_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TID_DETAILS_V1_TAG:
+ htt_print_tx_tid_stats_v1_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_TID_DETAILS_TAG:
+ htt_print_rx_tid_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_STATS_CMN_TAG:
+ htt_print_peer_stats_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_DETAILS_TAG:
+ htt_print_peer_details_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_MSDU_FLOWQ_TAG:
+ htt_print_msdu_flow_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_TX_RATE_STATS_TAG:
+ htt_print_tx_peer_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_RX_RATE_STATS_TAG:
+ htt_print_rx_peer_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_COMPL_STATS_TAG:
+ htt_print_tx_de_compl_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_CCA_1SEC_HIST_TAG:
+ case HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG:
+ case HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG:
+ htt_print_pdev_cca_stats_hist_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_CCA_COUNTERS_TAG:
+ htt_print_pdev_stats_cca_counters_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_WHAL_TX_TAG:
+ htt_print_hw_stats_whal_tx_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_TWT_SESSIONS_TAG:
+ htt_print_pdev_stats_twt_sessions_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_TWT_SESSION_TAG:
+ htt_print_pdev_stats_twt_session_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG:
+ htt_print_sched_txq_sched_order_su_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG:
+ htt_print_sched_txq_sched_ineligibility_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_OBSS_PD_TAG:
+ htt_print_pdev_obss_pd_stats_tlv_v(tag_buf, stats_req);
+ break;
+ case HTT_STATS_RING_BACKPRESSURE_STATS_TAG:
+ htt_print_backpressure_stats_tlv_v(tag_buf, user_data);
+ break;
+ case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG:
+ htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG:
+ htt_print_txbf_ofdma_ndpa_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG:
+ htt_print_txbf_ofdma_ndp_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG:
+ htt_print_txbf_ofdma_brp_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG:
+ htt_print_txbf_ofdma_steer_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_PHY_COUNTERS_TAG:
+ htt_print_phy_counters_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_PHY_STATS_TAG:
+ htt_print_phy_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_PHY_RESET_COUNTERS_TAG:
+ htt_print_phy_reset_counters_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_RESET_STATS_TAG:
+ htt_print_phy_reset_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:
+ htt_print_peer_ctrl_path_txrx_stats_tlv(tag_buf, stats_req);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_htt_extd_stats_msg *msg;
+ struct debug_htt_stats_req *stats_req;
+ struct ath11k *ar;
+ u32 len;
+ u64 cookie;
+ int ret;
+ bool send_completion = false;
+ u8 pdev_id;
+
+ msg = (struct ath11k_htt_extd_stats_msg *)skb->data;
+ cookie = msg->cookie;
+
+ if (FIELD_GET(HTT_STATS_COOKIE_MSB, cookie) != HTT_STATS_MAGIC_VALUE) {
+ ath11k_warn(ab, "received invalid htt ext stats event\n");
+ return;
+ }
+
+ pdev_id = FIELD_GET(HTT_STATS_COOKIE_LSB, cookie);
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ rcu_read_unlock();
+ if (!ar) {
+ ath11k_warn(ab, "failed to get ar for pdev_id %d\n", pdev_id);
+ return;
+ }
+
+ stats_req = ar->debug.htt_stats.stats_req;
+ if (!stats_req)
+ return;
+
+ spin_lock_bh(&ar->debug.htt_stats.lock);
+
+ stats_req->done = FIELD_GET(HTT_T2H_EXT_STATS_INFO1_DONE, msg->info1);
+ if (stats_req->done)
+ send_completion = true;
+
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+
+ len = FIELD_GET(HTT_T2H_EXT_STATS_INFO1_LENGTH, msg->info1);
+ ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath11k_dbg_htt_ext_stats_parse,
+ stats_req);
+ if (ret)
+ ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
+
+ if (send_completion)
+ complete(&stats_req->cmpln);
+}
+
+static ssize_t ath11k_read_htt_stats_type(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.type);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_htt_stats_type(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u8 type;
+ int ret;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS)
+ return -E2BIG;
+
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ return -EPERM;
+
+ ar->debug.htt_stats.type = type;
+
+ ret = count;
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_type = {
+ .read = ath11k_read_htt_stats_type,
+ .write = ath11k_write_htt_stats_type,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_prep_htt_stats_cfg_params(struct ath11k *ar, u8 type,
+ const u8 *mac_addr,
+ struct htt_ext_stats_cfg_params *cfg_params)
+{
+ if (!cfg_params)
+ return -EINVAL;
+
+ switch (type) {
+ case ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ:
+ case ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_HWQS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_TXQS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_CMDQS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PEER_INFO:
+ cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+ cfg_params->cfg0 |= FIELD_PREP(GENMASK(15, 1),
+ HTT_PEER_STATS_REQ_MODE_FLUSH_TQM);
+ cfg_params->cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE;
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]);
+ cfg_params->cfg3 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]);
+ cfg_params->cfg3 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]);
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO:
+ case ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_RINGS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS:
+ cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]);
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]);
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]);
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int ath11k_debugfs_htt_stats_req(struct ath11k *ar)
+{
+ struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req;
+ u8 type = stats_req->type;
+ u64 cookie = 0;
+ int ret, pdev_id = ar->pdev->pdev_id;
+ struct htt_ext_stats_cfg_params cfg_params = {};
+
+ init_completion(&stats_req->cmpln);
+
+ stats_req->done = false;
+ stats_req->pdev_id = pdev_id;
+
+ cookie = FIELD_PREP(HTT_STATS_COOKIE_MSB, HTT_STATS_MAGIC_VALUE) |
+ FIELD_PREP(HTT_STATS_COOKIE_LSB, pdev_id);
+
+ ret = ath11k_prep_htt_stats_cfg_params(ar, type, stats_req->peer_addr,
+ &cfg_params);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set htt stats cfg params: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+
+ while (!wait_for_completion_timeout(&stats_req->cmpln, 3 * HZ)) {
+ spin_lock_bh(&ar->debug.htt_stats.lock);
+ if (!stats_req->done) {
+ stats_req->done = true;
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+ ath11k_warn(ar->ab, "stats request timed out\n");
+ return -ETIMEDOUT;
+ }
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+ }
+
+ return 0;
+}
+
+static int ath11k_open_htt_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct debug_htt_stats_req *stats_req;
+ u8 type = ar->debug.htt_stats.type;
+ int ret;
+
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET ||
+ type == ATH11K_DBG_HTT_EXT_STATS_PEER_INFO ||
+ type == ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS)
+ return -EPERM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ if (ar->debug.htt_stats.stats_req) {
+ ret = -EAGAIN;
+ goto err_unlock;
+ }
+
+ stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
+ if (!stats_req) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ar->debug.htt_stats.stats_req = stats_req;
+ stats_req->type = type;
+
+ ret = ath11k_debugfs_htt_stats_req(ar);
+ if (ret < 0)
+ goto out;
+
+ file->private_data = stats_req;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+out:
+ vfree(stats_req);
+ ar->debug.htt_stats.stats_req = NULL;
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_release_htt_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+
+ mutex_lock(&ar->conf_mutex);
+ vfree(file->private_data);
+ ar->debug.htt_stats.stats_req = NULL;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_htt_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debug_htt_stats_req *stats_req = file->private_data;
+ char *buf;
+ u32 length = 0;
+
+ buf = stats_req->buf;
+ length = min_t(u32, stats_req->buf_len, ATH11K_HTT_STATS_BUF_SIZE);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_dump_htt_stats = {
+ .open = ath11k_open_htt_stats,
+ .release = ath11k_release_htt_stats,
+ .read = ath11k_read_htt_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_htt_stats_reset(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.reset);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_htt_stats_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u8 type;
+ struct htt_ext_stats_cfg_params cfg_params = {};
+ int ret;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS ||
+ type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ return -E2BIG;
+
+ mutex_lock(&ar->conf_mutex);
+ cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET;
+ cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type);
+ ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar,
+ ATH11K_DBG_HTT_EXT_STATS_RESET,
+ &cfg_params,
+ 0ULL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+ }
+
+ ar->debug.htt_stats.reset = type;
+ mutex_unlock(&ar->conf_mutex);
+
+ ret = count;
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_reset = {
+ .read = ath11k_read_htt_stats_reset,
+ .write = ath11k_write_htt_stats_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath11k_debugfs_htt_stats_init(struct ath11k *ar)
+{
+ spin_lock_init(&ar->debug.htt_stats.lock);
+ debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_type);
+ debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev,
+ ar, &fops_dump_htt_stats);
+ debugfs_create_file("htt_stats_reset", 0600, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_reset);
+}
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
new file mode 100644
index 000000000000..476689bbd4da
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
@@ -0,0 +1,2045 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef DEBUG_HTT_STATS_H
+#define DEBUG_HTT_STATS_H
+
+#define HTT_STATS_COOKIE_LSB GENMASK_ULL(31, 0)
+#define HTT_STATS_COOKIE_MSB GENMASK_ULL(63, 32)
+#define HTT_STATS_MAGIC_VALUE 0xF0F0F0F0
+
+enum htt_tlv_tag_t {
+ HTT_STATS_TX_PDEV_CMN_TAG = 0,
+ HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1,
+ HTT_STATS_TX_PDEV_SIFS_TAG = 2,
+ HTT_STATS_TX_PDEV_FLUSH_TAG = 3,
+ HTT_STATS_TX_PDEV_PHY_ERR_TAG = 4,
+ HTT_STATS_STRING_TAG = 5,
+ HTT_STATS_TX_HWQ_CMN_TAG = 6,
+ HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG = 7,
+ HTT_STATS_TX_HWQ_CMD_RESULT_TAG = 8,
+ HTT_STATS_TX_HWQ_CMD_STALL_TAG = 9,
+ HTT_STATS_TX_HWQ_FES_STATUS_TAG = 10,
+ HTT_STATS_TX_TQM_GEN_MPDU_TAG = 11,
+ HTT_STATS_TX_TQM_LIST_MPDU_TAG = 12,
+ HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13,
+ HTT_STATS_TX_TQM_CMN_TAG = 14,
+ HTT_STATS_TX_TQM_PDEV_TAG = 15,
+ HTT_STATS_TX_TQM_CMDQ_STATUS_TAG = 16,
+ HTT_STATS_TX_DE_EAPOL_PACKETS_TAG = 17,
+ HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG = 18,
+ HTT_STATS_TX_DE_CLASSIFY_STATS_TAG = 19,
+ HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG = 20,
+ HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG = 21,
+ HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG = 22,
+ HTT_STATS_TX_DE_CMN_TAG = 23,
+ HTT_STATS_RING_IF_TAG = 24,
+ HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG = 25,
+ HTT_STATS_SFM_CMN_TAG = 26,
+ HTT_STATS_SRING_STATS_TAG = 27,
+ HTT_STATS_RX_PDEV_FW_STATS_TAG = 28,
+ HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG = 29,
+ HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG = 30,
+ HTT_STATS_RX_SOC_FW_STATS_TAG = 31,
+ HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG = 32,
+ HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG = 33,
+ HTT_STATS_TX_PDEV_RATE_STATS_TAG = 34,
+ HTT_STATS_RX_PDEV_RATE_STATS_TAG = 35,
+ HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
+ HTT_STATS_TX_SCHED_CMN_TAG = 37,
+ HTT_STATS_TX_PDEV_MUMIMO_MPDU_STATS_TAG = 38,
+ HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
+ HTT_STATS_RING_IF_CMN_TAG = 40,
+ HTT_STATS_SFM_CLIENT_USER_TAG = 41,
+ HTT_STATS_SFM_CLIENT_TAG = 42,
+ HTT_STATS_TX_TQM_ERROR_STATS_TAG = 43,
+ HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44,
+ HTT_STATS_SRING_CMN_TAG = 45,
+ HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG = 46,
+ HTT_STATS_TX_SELFGEN_CMN_STATS_TAG = 47,
+ HTT_STATS_TX_SELFGEN_AC_STATS_TAG = 48,
+ HTT_STATS_TX_SELFGEN_AX_STATS_TAG = 49,
+ HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG = 50,
+ HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG = 51,
+ HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG = 52,
+ HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG = 53,
+ HTT_STATS_HW_INTR_MISC_TAG = 54,
+ HTT_STATS_HW_WD_TIMEOUT_TAG = 55,
+ HTT_STATS_HW_PDEV_ERRS_TAG = 56,
+ HTT_STATS_COUNTER_NAME_TAG = 57,
+ HTT_STATS_TX_TID_DETAILS_TAG = 58,
+ HTT_STATS_RX_TID_DETAILS_TAG = 59,
+ HTT_STATS_PEER_STATS_CMN_TAG = 60,
+ HTT_STATS_PEER_DETAILS_TAG = 61,
+ HTT_STATS_PEER_TX_RATE_STATS_TAG = 62,
+ HTT_STATS_PEER_RX_RATE_STATS_TAG = 63,
+ HTT_STATS_PEER_MSDU_FLOWQ_TAG = 64,
+ HTT_STATS_TX_DE_COMPL_STATS_TAG = 65,
+ HTT_STATS_WHAL_TX_TAG = 66,
+ HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67,
+ HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG = 68,
+ HTT_STATS_TX_TID_DETAILS_V1_TAG = 69,
+ HTT_STATS_PDEV_CCA_1SEC_HIST_TAG = 70,
+ HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG = 71,
+ HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72,
+ HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73,
+ HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74,
+ HTT_STATS_PDEV_TWT_SESSIONS_TAG = 75,
+ HTT_STATS_PDEV_TWT_SESSION_TAG = 76,
+ HTT_STATS_RX_REFILL_RXDMA_ERR_TAG = 77,
+ HTT_STATS_RX_REFILL_REO_ERR_TAG = 78,
+ HTT_STATS_RX_REO_RESOURCE_STATS_TAG = 79,
+ HTT_STATS_TX_SOUNDING_STATS_TAG = 80,
+ HTT_STATS_TX_PDEV_TX_PPDU_STATS_TAG = 81,
+ HTT_STATS_TX_PDEV_TRIED_MPDU_CNT_HIST_TAG = 82,
+ HTT_STATS_TX_HWQ_TRIED_MPDU_CNT_HIST_TAG = 83,
+ HTT_STATS_TX_HWQ_TXOP_USED_CNT_HIST_TAG = 84,
+ HTT_STATS_TX_DE_FW2WBM_RING_FULL_HIST_TAG = 85,
+ HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
+ HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
+ HTT_STATS_PDEV_OBSS_PD_TAG = 88,
+ HTT_STATS_HW_WAR_TAG = 89,
+ HTT_STATS_RING_BACKPRESSURE_STATS_TAG = 90,
+ HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG = 101,
+ HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108,
+ HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG = 113,
+ HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG = 114,
+ HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG = 115,
+ HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG = 116,
+ HTT_STATS_PHY_COUNTERS_TAG = 121,
+ HTT_STATS_PHY_STATS_TAG = 122,
+ HTT_STATS_PHY_RESET_COUNTERS_TAG = 123,
+ HTT_STATS_PHY_RESET_STATS_TAG = 124,
+
+ HTT_STATS_MAX_TAG,
+};
+
+#define HTT_STATS_MAX_STRING_SZ32 4
+#define HTT_STATS_MACID_INVALID 0xff
+#define HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS 10
+#define HTT_TX_HWQ_MAX_CMD_RESULT_STATS 13
+#define HTT_TX_HWQ_MAX_CMD_STALL_STATS 5
+#define HTT_TX_HWQ_MAX_FES_RESULT_STATS 10
+
+enum htt_tx_pdev_underrun_enum {
+ HTT_STATS_TX_PDEV_NO_DATA_UNDERRUN = 0,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_BETWEEN_MPDU = 1,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_WITHIN_MPDU = 2,
+ HTT_TX_PDEV_MAX_URRN_STATS = 3,
+};
+
+#define HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 71
+#define HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9
+#define HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS 10
+#define HTT_TX_PDEV_MAX_PHY_ERR_STATS 18
+#define HTT_TX_PDEV_SCHED_TX_MODE_MAX 4
+#define HTT_TX_PDEV_NUM_SCHED_ORDER_LOG 20
+
+#define HTT_RX_STATS_REFILL_MAX_RING 4
+#define HTT_RX_STATS_RXDMA_MAX_ERR 16
+#define HTT_RX_STATS_FW_DROP_REASON_MAX 16
+
+/* Bytes stored in little endian order */
+/* Length should be multiple of DWORD */
+struct htt_stats_string_tlv {
+ /* Can be variable length */
+ DECLARE_FLEX_ARRAY(u32, data);
+} __packed;
+
+#define HTT_STATS_MAC_ID GENMASK(7, 0)
+
+/* == TX PDEV STATS == */
+struct htt_tx_pdev_stats_cmn_tlv {
+ u32 mac_id__word;
+ u32 hw_queued;
+ u32 hw_reaped;
+ u32 underrun;
+ u32 hw_paused;
+ u32 hw_flush;
+ u32 hw_filt;
+ u32 tx_abort;
+ u32 mpdu_requeued;
+ u32 tx_xretry;
+ u32 data_rc;
+ u32 mpdu_dropped_xretry;
+ u32 illgl_rate_phy_err;
+ u32 cont_xretry;
+ u32 tx_timeout;
+ u32 pdev_resets;
+ u32 phy_underrun;
+ u32 txop_ovf;
+ u32 seq_posted;
+ u32 seq_failed_queueing;
+ u32 seq_completed;
+ u32 seq_restarted;
+ u32 mu_seq_posted;
+ u32 seq_switch_hw_paused;
+ u32 next_seq_posted_dsr;
+ u32 seq_posted_isr;
+ u32 seq_ctrl_cached;
+ u32 mpdu_count_tqm;
+ u32 msdu_count_tqm;
+ u32 mpdu_removed_tqm;
+ u32 msdu_removed_tqm;
+ u32 mpdus_sw_flush;
+ u32 mpdus_hw_filter;
+ u32 mpdus_truncated;
+ u32 mpdus_ack_failed;
+ u32 mpdus_expired;
+ u32 mpdus_seq_hw_retry;
+ u32 ack_tlv_proc;
+ u32 coex_abort_mpdu_cnt_valid;
+ u32 coex_abort_mpdu_cnt;
+ u32 num_total_ppdus_tried_ota;
+ u32 num_data_ppdus_tried_ota;
+ u32 local_ctrl_mgmt_enqued;
+ u32 local_ctrl_mgmt_freed;
+ u32 local_data_enqued;
+ u32 local_data_freed;
+ u32 mpdu_tried;
+ u32 isr_wait_seq_posted;
+
+ u32 tx_active_dur_us_low;
+ u32 tx_active_dur_us_high;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_urrn_tlv_v {
+ /* HTT_TX_PDEV_MAX_URRN_STATS */
+ DECLARE_FLEX_ARRAY(u32, urrn_stats);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_flush_tlv_v {
+ /* HTT_TX_PDEV_MAX_FLUSH_REASON_STATS */
+ DECLARE_FLEX_ARRAY(u32, flush_errs);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_sifs_tlv_v {
+ /* HTT_TX_PDEV_MAX_SIFS_BURST_STATS */
+ DECLARE_FLEX_ARRAY(u32, sifs_status);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_phy_err_tlv_v {
+ /* HTT_TX_PDEV_MAX_PHY_ERR_STATS */
+ DECLARE_FLEX_ARRAY(u32, phy_errs);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_sifs_hist_tlv_v {
+ /* HTT_TX_PDEV_SIFS_BURST_HIST_STATS */
+ DECLARE_FLEX_ARRAY(u32, sifs_hist_status);
+};
+
+struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v {
+ u32 num_data_ppdus_legacy_su;
+ u32 num_data_ppdus_ac_su;
+ u32 num_data_ppdus_ax_su;
+ u32 num_data_ppdus_ac_su_txbf;
+ u32 num_data_ppdus_ax_su_txbf;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size .
+ *
+ * Tried_mpdu_cnt_hist is the histogram of MPDUs tries per HWQ.
+ * The tries here is the count of the MPDUS within a PPDU that the
+ * HW had attempted to transmit on air, for the HWSCH Schedule
+ * command submitted by FW.It is not the retry attempts.
+ * The histogram bins are 0-29, 30-59, 60-89 and so on. The are
+ * 10 bins in this histogram. They are defined in FW using the
+ * following macros
+ * #define WAL_MAX_TRIED_MPDU_CNT_HISTOGRAM 9
+ * #define WAL_TRIED_MPDU_CNT_HISTOGRAM_INTERVAL 30
+ */
+struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v {
+ u32 hist_bin_size;
+ u32 tried_mpdu_cnt_hist[]; /* HTT_TX_PDEV_TRIED_MPDU_CNT_HIST */
+};
+
+/* == SOC ERROR STATS == */
+
+/* =============== PDEV ERROR STATS ============== */
+#define HTT_STATS_MAX_HW_INTR_NAME_LEN 8
+struct htt_hw_stats_intr_misc_tlv {
+ /* Stored as little endian */
+ u8 hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN];
+ u32 mask;
+ u32 count;
+};
+
+#define HTT_STATS_MAX_HW_MODULE_NAME_LEN 8
+struct htt_hw_stats_wd_timeout_tlv {
+ /* Stored as little endian */
+ u8 hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN];
+ u32 count;
+};
+
+struct htt_hw_stats_pdev_errs_tlv {
+ u32 mac_id__word; /* BIT [ 7 : 0] : mac_id */
+ u32 tx_abort;
+ u32 tx_abort_fail_count;
+ u32 rx_abort;
+ u32 rx_abort_fail_count;
+ u32 warm_reset;
+ u32 cold_reset;
+ u32 tx_flush;
+ u32 tx_glb_reset;
+ u32 tx_txq_reset;
+ u32 rx_timeout_reset;
+};
+
+struct htt_hw_stats_whal_tx_tlv {
+ u32 mac_id__word;
+ u32 last_unpause_ppdu_id;
+ u32 hwsch_unpause_wait_tqm_write;
+ u32 hwsch_dummy_tlv_skipped;
+ u32 hwsch_misaligned_offset_received;
+ u32 hwsch_reset_count;
+ u32 hwsch_dev_reset_war;
+ u32 hwsch_delayed_pause;
+ u32 hwsch_long_delayed_pause;
+ u32 sch_rx_ppdu_no_response;
+ u32 sch_selfgen_response;
+ u32 sch_rx_sifs_resp_trigger;
+};
+
+/* ============ PEER STATS ============ */
+#define HTT_MSDU_FLOW_STATS_TX_FLOW_NO GENMASK(15, 0)
+#define HTT_MSDU_FLOW_STATS_TID_NUM GENMASK(19, 16)
+#define HTT_MSDU_FLOW_STATS_DROP_RULE BIT(20)
+
+struct htt_msdu_flow_stats_tlv {
+ u32 last_update_timestamp;
+ u32 last_add_timestamp;
+ u32 last_remove_timestamp;
+ u32 total_processed_msdu_count;
+ u32 cur_msdu_count_in_flowq;
+ u32 sw_peer_id;
+ u32 tx_flow_no__tid_num__drop_rule;
+ u32 last_cycle_enqueue_count;
+ u32 last_cycle_dequeue_count;
+ u32 last_cycle_drop_count;
+ u32 current_drop_th;
+};
+
+#define MAX_HTT_TID_NAME 8
+
+#define HTT_TX_TID_STATS_SW_PEER_ID GENMASK(15, 0)
+#define HTT_TX_TID_STATS_TID_NUM GENMASK(31, 16)
+#define HTT_TX_TID_STATS_NUM_SCHED_PENDING GENMASK(7, 0)
+#define HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ GENMASK(15, 8)
+
+/* Tidq stats */
+struct htt_tx_tid_stats_tlv {
+ /* Stored as little endian */
+ u8 tid_name[MAX_HTT_TID_NAME];
+ u32 sw_peer_id__tid_num;
+ u32 num_sched_pending__num_ppdu_in_hwq;
+ u32 tid_flags;
+ u32 hw_queued;
+ u32 hw_reaped;
+ u32 mpdus_hw_filter;
+
+ u32 qdepth_bytes;
+ u32 qdepth_num_msdu;
+ u32 qdepth_num_mpdu;
+ u32 last_scheduled_tsmp;
+ u32 pause_module_id;
+ u32 block_module_id;
+ u32 tid_tx_airtime;
+};
+
+#define HTT_TX_TID_STATS_V1_SW_PEER_ID GENMASK(15, 0)
+#define HTT_TX_TID_STATS_V1_TID_NUM GENMASK(31, 16)
+#define HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING GENMASK(7, 0)
+#define HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ GENMASK(15, 8)
+
+/* Tidq stats */
+struct htt_tx_tid_stats_v1_tlv {
+ /* Stored as little endian */
+ u8 tid_name[MAX_HTT_TID_NAME];
+ u32 sw_peer_id__tid_num;
+ u32 num_sched_pending__num_ppdu_in_hwq;
+ u32 tid_flags;
+ u32 max_qdepth_bytes;
+ u32 max_qdepth_n_msdus;
+ u32 rsvd;
+
+ u32 qdepth_bytes;
+ u32 qdepth_num_msdu;
+ u32 qdepth_num_mpdu;
+ u32 last_scheduled_tsmp;
+ u32 pause_module_id;
+ u32 block_module_id;
+ u32 tid_tx_airtime;
+ u32 allow_n_flags;
+ u32 sendn_frms_allowed;
+};
+
+#define HTT_RX_TID_STATS_SW_PEER_ID GENMASK(15, 0)
+#define HTT_RX_TID_STATS_TID_NUM GENMASK(31, 16)
+
+struct htt_rx_tid_stats_tlv {
+ u32 sw_peer_id__tid_num;
+ u8 tid_name[MAX_HTT_TID_NAME];
+ u32 dup_in_reorder;
+ u32 dup_past_outside_window;
+ u32 dup_past_within_window;
+ u32 rxdesc_err_decrypt;
+ u32 tid_rx_airtime;
+};
+
+#define HTT_MAX_COUNTER_NAME 8
+struct htt_counter_tlv {
+ u8 counter_name[HTT_MAX_COUNTER_NAME];
+ u32 count;
+};
+
+struct htt_peer_stats_cmn_tlv {
+ u32 ppdu_cnt;
+ u32 mpdu_cnt;
+ u32 msdu_cnt;
+ u32 pause_bitmap;
+ u32 block_bitmap;
+ u32 current_timestamp;
+ u32 peer_tx_airtime;
+ u32 peer_rx_airtime;
+ s32 rssi;
+ u32 peer_enqueued_count_low;
+ u32 peer_enqueued_count_high;
+ u32 peer_dequeued_count_low;
+ u32 peer_dequeued_count_high;
+ u32 peer_dropped_count_low;
+ u32 peer_dropped_count_high;
+ u32 ppdu_transmitted_bytes_low;
+ u32 ppdu_transmitted_bytes_high;
+ u32 peer_ttl_removed_count;
+ u32 inactive_time;
+};
+
+#define HTT_PEER_DETAILS_VDEV_ID GENMASK(7, 0)
+#define HTT_PEER_DETAILS_PDEV_ID GENMASK(15, 8)
+#define HTT_PEER_DETAILS_AST_IDX GENMASK(31, 16)
+
+struct htt_peer_details_tlv {
+ u32 peer_type;
+ u32 sw_peer_id;
+ u32 vdev_pdev_ast_idx;
+ struct htt_mac_addr mac_addr;
+ u32 peer_flags;
+ u32 qpeer_flags;
+};
+
+enum htt_stats_param_type {
+ HTT_STATS_PREAM_OFDM,
+ HTT_STATS_PREAM_CCK,
+ HTT_STATS_PREAM_HT,
+ HTT_STATS_PREAM_VHT,
+ HTT_STATS_PREAM_HE,
+ HTT_STATS_PREAM_RSVD,
+ HTT_STATS_PREAM_RSVD1,
+
+ HTT_STATS_PREAM_COUNT,
+};
+
+#define HTT_TX_PEER_STATS_NUM_MCS_COUNTERS 12
+#define HTT_TX_PEER_STATS_NUM_GI_COUNTERS 4
+#define HTT_TX_PEER_STATS_NUM_DCM_COUNTERS 5
+#define HTT_TX_PEER_STATS_NUM_BW_COUNTERS 4
+#define HTT_TX_PEER_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_TX_PEER_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+
+struct htt_tx_peer_rate_stats_tlv {
+ u32 tx_ldpc;
+ u32 rts_cnt;
+ u32 ack_rssi;
+
+ u32 tx_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ u32 tx_su_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ u32 tx_mu_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 tx_nss[HTT_TX_PEER_STATS_NUM_SPATIAL_STREAMS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 tx_bw[HTT_TX_PEER_STATS_NUM_BW_COUNTERS];
+ u32 tx_stbc[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ u32 tx_pream[HTT_TX_PEER_STATS_NUM_PREAMBLE_TYPES];
+
+ /* Counters to track number of tx packets in each GI
+ * (400us, 800us, 1600us & 3200us) in each mcs (0-11)
+ */
+ u32 tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS][HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+
+ /* Counters to track packets in dcm mcs (MCS 0, 1, 3, 4) */
+ u32 tx_dcm[HTT_TX_PEER_STATS_NUM_DCM_COUNTERS];
+
+};
+
+#define HTT_RX_PEER_STATS_NUM_MCS_COUNTERS 12
+#define HTT_RX_PEER_STATS_NUM_GI_COUNTERS 4
+#define HTT_RX_PEER_STATS_NUM_DCM_COUNTERS 5
+#define HTT_RX_PEER_STATS_NUM_BW_COUNTERS 4
+#define HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_RX_PEER_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+
+struct htt_rx_peer_rate_stats_tlv {
+ u32 nsts;
+
+ /* Number of rx ldpc packets */
+ u32 rx_ldpc;
+ /* Number of rx rts packets */
+ u32 rts_cnt;
+
+ u32 rssi_mgmt; /* units = dB above noise floor */
+ u32 rssi_data; /* units = dB above noise floor */
+ u32 rssi_comb; /* units = dB above noise floor */
+ u32 rx_mcs[HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 rx_nss[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS];
+ u32 rx_dcm[HTT_RX_PEER_STATS_NUM_DCM_COUNTERS];
+ u32 rx_stbc[HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 rx_bw[HTT_RX_PEER_STATS_NUM_BW_COUNTERS];
+ u32 rx_pream[HTT_RX_PEER_STATS_NUM_PREAMBLE_TYPES];
+ /* units = dB above noise floor */
+ u8 rssi_chain[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PEER_STATS_NUM_BW_COUNTERS];
+
+ /* Counters to track number of rx packets in each GI in each mcs (0-11) */
+ u32 rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS]
+ [HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+};
+
+enum htt_peer_stats_req_mode {
+ HTT_PEER_STATS_REQ_MODE_NO_QUERY,
+ HTT_PEER_STATS_REQ_MODE_QUERY_TQM,
+ HTT_PEER_STATS_REQ_MODE_FLUSH_TQM,
+};
+
+enum htt_peer_stats_tlv_enum {
+ HTT_PEER_STATS_CMN_TLV = 0,
+ HTT_PEER_DETAILS_TLV = 1,
+ HTT_TX_PEER_RATE_STATS_TLV = 2,
+ HTT_RX_PEER_RATE_STATS_TLV = 3,
+ HTT_TX_TID_STATS_TLV = 4,
+ HTT_RX_TID_STATS_TLV = 5,
+ HTT_MSDU_FLOW_STATS_TLV = 6,
+
+ HTT_PEER_STATS_MAX_TLV = 31,
+};
+
+/* =========== MUMIMO HWQ stats =========== */
+/* MU MIMO stats per hwQ */
+struct htt_tx_hwq_mu_mimo_sch_stats_tlv {
+ u32 mu_mimo_sch_posted;
+ u32 mu_mimo_sch_failed;
+ u32 mu_mimo_ppdu_posted;
+};
+
+struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv {
+ u32 mu_mimo_mpdus_queued_usr;
+ u32 mu_mimo_mpdus_tried_usr;
+ u32 mu_mimo_mpdus_failed_usr;
+ u32 mu_mimo_mpdus_requeued_usr;
+ u32 mu_mimo_err_no_ba_usr;
+ u32 mu_mimo_mpdu_underrun_usr;
+ u32 mu_mimo_ampdu_underrun_usr;
+};
+
+#define HTT_TX_HWQ_STATS_MAC_ID GENMASK(7, 0)
+#define HTT_TX_HWQ_STATS_HWQ_ID GENMASK(15, 8)
+
+struct htt_tx_hwq_mu_mimo_cmn_stats_tlv {
+ u32 mac_id__hwq_id__word;
+};
+
+/* == TX HWQ STATS == */
+struct htt_tx_hwq_stats_cmn_tlv {
+ u32 mac_id__hwq_id__word;
+
+ /* PPDU level stats */
+ u32 xretry;
+ u32 underrun_cnt;
+ u32 flush_cnt;
+ u32 filt_cnt;
+ u32 null_mpdu_bmap;
+ u32 user_ack_failure;
+ u32 ack_tlv_proc;
+ u32 sched_id_proc;
+ u32 null_mpdu_tx_count;
+ u32 mpdu_bmap_not_recvd;
+
+ /* Selfgen stats per hwQ */
+ u32 num_bar;
+ u32 rts;
+ u32 cts2self;
+ u32 qos_null;
+
+ /* MPDU level stats */
+ u32 mpdu_tried_cnt;
+ u32 mpdu_queued_cnt;
+ u32 mpdu_ack_fail_cnt;
+ u32 mpdu_filt_cnt;
+ u32 false_mpdu_ack_count;
+
+ u32 txq_timeout;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_difs_latency_stats_tlv_v {
+ u32 hist_intvl;
+ /* histogram of ppdu post to hwsch - > cmd status received */
+ u32 difs_latency_hist[]; /* HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_cmd_result_stats_tlv_v {
+ /* Histogram of sched cmd result, HTT_TX_HWQ_MAX_CMD_RESULT_STATS */
+ DECLARE_FLEX_ARRAY(u32, cmd_result);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_cmd_stall_stats_tlv_v {
+ /* Histogram of various pause conitions, HTT_TX_HWQ_MAX_CMD_STALL_STATS */
+ DECLARE_FLEX_ARRAY(u32, cmd_stall_status);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_fes_result_stats_tlv_v {
+ /* Histogram of number of user fes result, HTT_TX_HWQ_MAX_FES_RESULT_STATS */
+ DECLARE_FLEX_ARRAY(u32, fes_result);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size
+ *
+ * The hwq_tried_mpdu_cnt_hist is a histogram of MPDUs tries per HWQ.
+ * The tries here is the count of the MPDUS within a PPDU that the HW
+ * had attempted to transmit on air, for the HWSCH Schedule command
+ * submitted by FW in this HWQ .It is not the retry attempts. The
+ * histogram bins are 0-29, 30-59, 60-89 and so on. The are 10 bins
+ * in this histogram.
+ * they are defined in FW using the following macros
+ * #define WAL_MAX_TRIED_MPDU_CNT_HISTOGRAM 9
+ * #define WAL_TRIED_MPDU_CNT_HISTOGRAM_INTERVAL 30
+ */
+struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v {
+ u32 hist_bin_size;
+ /* Histogram of number of mpdus on tried mpdu */
+ u32 tried_mpdu_cnt_hist[]; /* HTT_TX_HWQ_TRIED_MPDU_CNT_HIST */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size
+ *
+ * The txop_used_cnt_hist is the histogram of txop per burst. After
+ * completing the burst, we identify the txop used in the burst and
+ * incr the corresponding bin.
+ * Each bin represents 1ms & we have 10 bins in this histogram.
+ * they are defined in FW using the following macros
+ * #define WAL_MAX_TXOP_USED_CNT_HISTOGRAM 10
+ * #define WAL_TXOP_USED_HISTOGRAM_INTERVAL 1000 ( 1 ms )
+ */
+struct htt_tx_hwq_txop_used_cnt_hist_tlv_v {
+ /* Histogram of txop used cnt, HTT_TX_HWQ_TXOP_USED_CNT_HIST */
+ DECLARE_FLEX_ARRAY(u32, txop_used_cnt_hist);
+};
+
+/* == TX SELFGEN STATS == */
+struct htt_tx_selfgen_cmn_stats_tlv {
+ u32 mac_id__word;
+ u32 su_bar;
+ u32 rts;
+ u32 cts2self;
+ u32 qos_null;
+ u32 delayed_bar_1; /* MU user 1 */
+ u32 delayed_bar_2; /* MU user 2 */
+ u32 delayed_bar_3; /* MU user 3 */
+ u32 delayed_bar_4; /* MU user 4 */
+ u32 delayed_bar_5; /* MU user 5 */
+ u32 delayed_bar_6; /* MU user 6 */
+ u32 delayed_bar_7; /* MU user 7 */
+};
+
+struct htt_tx_selfgen_ac_stats_tlv {
+ /* 11AC */
+ u32 ac_su_ndpa;
+ u32 ac_su_ndp;
+ u32 ac_mu_mimo_ndpa;
+ u32 ac_mu_mimo_ndp;
+ u32 ac_mu_mimo_brpoll_1; /* MU user 1 */
+ u32 ac_mu_mimo_brpoll_2; /* MU user 2 */
+ u32 ac_mu_mimo_brpoll_3; /* MU user 3 */
+};
+
+struct htt_tx_selfgen_ax_stats_tlv {
+ /* 11AX */
+ u32 ax_su_ndpa;
+ u32 ax_su_ndp;
+ u32 ax_mu_mimo_ndpa;
+ u32 ax_mu_mimo_ndp;
+ u32 ax_mu_mimo_brpoll_1; /* MU user 1 */
+ u32 ax_mu_mimo_brpoll_2; /* MU user 2 */
+ u32 ax_mu_mimo_brpoll_3; /* MU user 3 */
+ u32 ax_mu_mimo_brpoll_4; /* MU user 4 */
+ u32 ax_mu_mimo_brpoll_5; /* MU user 5 */
+ u32 ax_mu_mimo_brpoll_6; /* MU user 6 */
+ u32 ax_mu_mimo_brpoll_7; /* MU user 7 */
+ u32 ax_basic_trigger;
+ u32 ax_bsr_trigger;
+ u32 ax_mu_bar_trigger;
+ u32 ax_mu_rts_trigger;
+ u32 ax_ulmumimo_trigger;
+};
+
+struct htt_tx_selfgen_ac_err_stats_tlv {
+ /* 11AC error stats */
+ u32 ac_su_ndp_err;
+ u32 ac_su_ndpa_err;
+ u32 ac_mu_mimo_ndpa_err;
+ u32 ac_mu_mimo_ndp_err;
+ u32 ac_mu_mimo_brp1_err;
+ u32 ac_mu_mimo_brp2_err;
+ u32 ac_mu_mimo_brp3_err;
+};
+
+struct htt_tx_selfgen_ax_err_stats_tlv {
+ /* 11AX error stats */
+ u32 ax_su_ndp_err;
+ u32 ax_su_ndpa_err;
+ u32 ax_mu_mimo_ndpa_err;
+ u32 ax_mu_mimo_ndp_err;
+ u32 ax_mu_mimo_brp1_err;
+ u32 ax_mu_mimo_brp2_err;
+ u32 ax_mu_mimo_brp3_err;
+ u32 ax_mu_mimo_brp4_err;
+ u32 ax_mu_mimo_brp5_err;
+ u32 ax_mu_mimo_brp6_err;
+ u32 ax_mu_mimo_brp7_err;
+ u32 ax_basic_trigger_err;
+ u32 ax_bsr_trigger_err;
+ u32 ax_mu_bar_trigger_err;
+ u32 ax_mu_rts_trigger_err;
+ u32 ax_ulmumimo_trigger_err;
+};
+
+/* == TX MU STATS == */
+#define HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS 4
+#define HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS 8
+#define HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS 74
+#define HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS 8
+
+struct htt_tx_pdev_mu_mimo_sch_stats_tlv {
+ /* mu-mimo sw sched cmd stats */
+ u32 mu_mimo_sch_posted;
+ u32 mu_mimo_sch_failed;
+ /* MU PPDU stats per hwQ */
+ u32 mu_mimo_ppdu_posted;
+ /*
+ * Counts the number of users in each transmission of
+ * the given TX mode.
+ *
+ * Index is the number of users - 1.
+ */
+ u32 ac_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
+ u32 ax_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+ u32 ax_ofdma_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_bsr_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_bar_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+
+ /* UL MU-MIMO */
+ /* ax_ul_mumimo_basic_sch_nusers[i] is the number of basic triggers sent
+ * for (i+1) users
+ */
+ u32 ax_ul_mumimo_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
+
+ /* ax_ul_mumimo_brp_sch_nusers[i] is the number of brp triggers sent
+ * for (i+1) users
+ */
+ u32 ax_ul_mumimo_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
+
+ u32 ac_mu_mimo_sch_posted_per_grp_sz[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
+ u32 ax_mu_mimo_sch_posted_per_grp_sz[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+};
+
+struct htt_tx_pdev_mu_mimo_mpdu_stats_tlv {
+ u32 mu_mimo_mpdus_queued_usr;
+ u32 mu_mimo_mpdus_tried_usr;
+ u32 mu_mimo_mpdus_failed_usr;
+ u32 mu_mimo_mpdus_requeued_usr;
+ u32 mu_mimo_err_no_ba_usr;
+ u32 mu_mimo_mpdu_underrun_usr;
+ u32 mu_mimo_ampdu_underrun_usr;
+
+ u32 ax_mu_mimo_mpdus_queued_usr;
+ u32 ax_mu_mimo_mpdus_tried_usr;
+ u32 ax_mu_mimo_mpdus_failed_usr;
+ u32 ax_mu_mimo_mpdus_requeued_usr;
+ u32 ax_mu_mimo_err_no_ba_usr;
+ u32 ax_mu_mimo_mpdu_underrun_usr;
+ u32 ax_mu_mimo_ampdu_underrun_usr;
+
+ u32 ax_ofdma_mpdus_queued_usr;
+ u32 ax_ofdma_mpdus_tried_usr;
+ u32 ax_ofdma_mpdus_failed_usr;
+ u32 ax_ofdma_mpdus_requeued_usr;
+ u32 ax_ofdma_err_no_ba_usr;
+ u32 ax_ofdma_mpdu_underrun_usr;
+ u32 ax_ofdma_ampdu_underrun_usr;
+};
+
+#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC 1
+#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX 2
+#define HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX 3
+
+struct htt_tx_pdev_mpdu_stats_tlv {
+ /* mpdu level stats */
+ u32 mpdus_queued_usr;
+ u32 mpdus_tried_usr;
+ u32 mpdus_failed_usr;
+ u32 mpdus_requeued_usr;
+ u32 err_no_ba_usr;
+ u32 mpdu_underrun_usr;
+ u32 ampdu_underrun_usr;
+ u32 user_index;
+ u32 tx_sched_mode; /* HTT_STATS_TX_SCHED_MODE_xxx */
+};
+
+/* == TX SCHED STATS == */
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_cmd_posted_tlv_v {
+ /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
+ DECLARE_FLEX_ARRAY(u32, sched_cmd_posted);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_cmd_reaped_tlv_v {
+ /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
+ DECLARE_FLEX_ARRAY(u32, sched_cmd_reaped);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_sched_order_su_tlv_v {
+ /* HTT_TX_PDEV_NUM_SCHED_ORDER_LOG */
+ DECLARE_FLEX_ARRAY(u32, sched_order_su);
+};
+
+enum htt_sched_txq_sched_ineligibility_tlv_enum {
+ HTT_SCHED_TID_SKIP_SCHED_MASK_DISABLED = 0,
+ HTT_SCHED_TID_SKIP_NOTIFY_MPDU,
+ HTT_SCHED_TID_SKIP_MPDU_STATE_INVALID,
+ HTT_SCHED_TID_SKIP_SCHED_DISABLED,
+ HTT_SCHED_TID_SKIP_TQM_BYPASS_CMD_PENDING,
+ HTT_SCHED_TID_SKIP_SECOND_SU_SCHEDULE,
+
+ HTT_SCHED_TID_SKIP_CMD_SLOT_NOT_AVAIL,
+ HTT_SCHED_TID_SKIP_NO_ENQ,
+ HTT_SCHED_TID_SKIP_LOW_ENQ,
+ HTT_SCHED_TID_SKIP_PAUSED,
+ HTT_SCHED_TID_SKIP_UL,
+ HTT_SCHED_TID_REMOVE_PAUSED,
+ HTT_SCHED_TID_REMOVE_NO_ENQ,
+ HTT_SCHED_TID_REMOVE_UL,
+ HTT_SCHED_TID_QUERY,
+ HTT_SCHED_TID_SU_ONLY,
+ HTT_SCHED_TID_ELIGIBLE,
+ HTT_SCHED_INELIGIBILITY_MAX,
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_sched_ineligibility_tlv_v {
+ /* indexed by htt_sched_txq_sched_ineligibility_tlv_enum */
+ DECLARE_FLEX_ARRAY(u32, sched_ineligibility);
+};
+
+#define HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0)
+#define HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8)
+
+struct htt_tx_pdev_stats_sched_per_txq_tlv {
+ u32 mac_id__txq_id__word;
+ u32 sched_policy;
+ u32 last_sched_cmd_posted_timestamp;
+ u32 last_sched_cmd_compl_timestamp;
+ u32 sched_2_tac_lwm_count;
+ u32 sched_2_tac_ring_full;
+ u32 sched_cmd_post_failure;
+ u32 num_active_tids;
+ u32 num_ps_schedules;
+ u32 sched_cmds_pending;
+ u32 num_tid_register;
+ u32 num_tid_unregister;
+ u32 num_qstats_queried;
+ u32 qstats_update_pending;
+ u32 last_qstats_query_timestamp;
+ u32 num_tqm_cmdq_full;
+ u32 num_de_sched_algo_trigger;
+ u32 num_rt_sched_algo_trigger;
+ u32 num_tqm_sched_algo_trigger;
+ u32 notify_sched;
+ u32 dur_based_sendn_term;
+};
+
+struct htt_stats_tx_sched_cmn_tlv {
+ /* BIT [ 7 : 0] :- mac_id
+ * BIT [31 : 8] :- reserved
+ */
+ u32 mac_id__word;
+ /* Current timestamp */
+ u32 current_timestamp;
+};
+
+/* == TQM STATS == */
+#define HTT_TX_TQM_MAX_GEN_MPDU_END_REASON 16
+#define HTT_TX_TQM_MAX_LIST_MPDU_END_REASON 16
+#define HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS 16
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_gen_mpdu_stats_tlv_v {
+ /* HTT_TX_TQM_MAX_GEN_MPDU_END_REASON */
+ DECLARE_FLEX_ARRAY(u32, gen_mpdu_end_reason);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_list_mpdu_stats_tlv_v {
+ /* HTT_TX_TQM_MAX_LIST_MPDU_END_REASON */
+ DECLARE_FLEX_ARRAY(u32, list_mpdu_end_reason);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_list_mpdu_cnt_tlv_v {
+ /* HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS */
+ DECLARE_FLEX_ARRAY(u32, list_mpdu_cnt_hist);
+};
+
+struct htt_tx_tqm_pdev_stats_tlv_v {
+ u32 msdu_count;
+ u32 mpdu_count;
+ u32 remove_msdu;
+ u32 remove_mpdu;
+ u32 remove_msdu_ttl;
+ u32 send_bar;
+ u32 bar_sync;
+ u32 notify_mpdu;
+ u32 sync_cmd;
+ u32 write_cmd;
+ u32 hwsch_trigger;
+ u32 ack_tlv_proc;
+ u32 gen_mpdu_cmd;
+ u32 gen_list_cmd;
+ u32 remove_mpdu_cmd;
+ u32 remove_mpdu_tried_cmd;
+ u32 mpdu_queue_stats_cmd;
+ u32 mpdu_head_info_cmd;
+ u32 msdu_flow_stats_cmd;
+ u32 remove_msdu_cmd;
+ u32 remove_msdu_ttl_cmd;
+ u32 flush_cache_cmd;
+ u32 update_mpduq_cmd;
+ u32 enqueue;
+ u32 enqueue_notify;
+ u32 notify_mpdu_at_head;
+ u32 notify_mpdu_state_valid;
+ /*
+ * On receiving TQM_FLOW_NOT_EMPTY_STATUS from TQM, (on MSDUs being enqueued
+ * the flow is non empty), if the number of MSDUs is greater than the threshold,
+ * notify is incremented. UDP_THRESH counters are for UDP MSDUs, and NONUDP are
+ * for non-UDP MSDUs.
+ * MSDUQ_SWNOTIFY_UDP_THRESH1 threshold - sched_udp_notify1 is incremented
+ * MSDUQ_SWNOTIFY_UDP_THRESH2 threshold - sched_udp_notify2 is incremented
+ * MSDUQ_SWNOTIFY_NONUDP_THRESH1 threshold - sched_nonudp_notify1 is incremented
+ * MSDUQ_SWNOTIFY_NONUDP_THRESH2 threshold - sched_nonudp_notify2 is incremented
+ *
+ * Notify signifies that we trigger the scheduler.
+ */
+ u32 sched_udp_notify1;
+ u32 sched_udp_notify2;
+ u32 sched_nonudp_notify1;
+ u32 sched_nonudp_notify2;
+};
+
+struct htt_tx_tqm_cmn_stats_tlv {
+ u32 mac_id__word;
+ u32 max_cmdq_id;
+ u32 list_mpdu_cnt_hist_intvl;
+
+ /* Global stats */
+ u32 add_msdu;
+ u32 q_empty;
+ u32 q_not_empty;
+ u32 drop_notification;
+ u32 desc_threshold;
+};
+
+struct htt_tx_tqm_error_stats_tlv {
+ /* Error stats */
+ u32 q_empty_failure;
+ u32 q_not_empty_failure;
+ u32 add_msdu_failure;
+};
+
+/* == TQM CMDQ stats == */
+#define HTT_TX_TQM_CMDQ_STATUS_MAC_ID GENMASK(7, 0)
+#define HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID GENMASK(15, 8)
+
+struct htt_tx_tqm_cmdq_status_tlv {
+ u32 mac_id__cmdq_id__word;
+ u32 sync_cmd;
+ u32 write_cmd;
+ u32 gen_mpdu_cmd;
+ u32 mpdu_queue_stats_cmd;
+ u32 mpdu_head_info_cmd;
+ u32 msdu_flow_stats_cmd;
+ u32 remove_mpdu_cmd;
+ u32 remove_msdu_cmd;
+ u32 flush_cache_cmd;
+ u32 update_mpduq_cmd;
+ u32 update_msduq_cmd;
+};
+
+/* == TX-DE STATS == */
+/* Structures for tx de stats */
+struct htt_tx_de_eapol_packets_stats_tlv {
+ u32 m1_packets;
+ u32 m2_packets;
+ u32 m3_packets;
+ u32 m4_packets;
+ u32 g1_packets;
+ u32 g2_packets;
+};
+
+struct htt_tx_de_classify_failed_stats_tlv {
+ u32 ap_bss_peer_not_found;
+ u32 ap_bcast_mcast_no_peer;
+ u32 sta_delete_in_progress;
+ u32 ibss_no_bss_peer;
+ u32 invalid_vdev_type;
+ u32 invalid_ast_peer_entry;
+ u32 peer_entry_invalid;
+ u32 ethertype_not_ip;
+ u32 eapol_lookup_failed;
+ u32 qpeer_not_allow_data;
+ u32 fse_tid_override;
+ u32 ipv6_jumbogram_zero_length;
+ u32 qos_to_non_qos_in_prog;
+};
+
+struct htt_tx_de_classify_stats_tlv {
+ u32 arp_packets;
+ u32 igmp_packets;
+ u32 dhcp_packets;
+ u32 host_inspected;
+ u32 htt_included;
+ u32 htt_valid_mcs;
+ u32 htt_valid_nss;
+ u32 htt_valid_preamble_type;
+ u32 htt_valid_chainmask;
+ u32 htt_valid_guard_interval;
+ u32 htt_valid_retries;
+ u32 htt_valid_bw_info;
+ u32 htt_valid_power;
+ u32 htt_valid_key_flags;
+ u32 htt_valid_no_encryption;
+ u32 fse_entry_count;
+ u32 fse_priority_be;
+ u32 fse_priority_high;
+ u32 fse_priority_low;
+ u32 fse_traffic_ptrn_be;
+ u32 fse_traffic_ptrn_over_sub;
+ u32 fse_traffic_ptrn_bursty;
+ u32 fse_traffic_ptrn_interactive;
+ u32 fse_traffic_ptrn_periodic;
+ u32 fse_hwqueue_alloc;
+ u32 fse_hwqueue_created;
+ u32 fse_hwqueue_send_to_host;
+ u32 mcast_entry;
+ u32 bcast_entry;
+ u32 htt_update_peer_cache;
+ u32 htt_learning_frame;
+ u32 fse_invalid_peer;
+ /*
+ * mec_notify is HTT TX WBM multicast echo check notification
+ * from firmware to host. FW sends SA addresses to host for all
+ * multicast/broadcast packets received on STA side.
+ */
+ u32 mec_notify;
+};
+
+struct htt_tx_de_classify_status_stats_tlv {
+ u32 eok;
+ u32 classify_done;
+ u32 lookup_failed;
+ u32 send_host_dhcp;
+ u32 send_host_mcast;
+ u32 send_host_unknown_dest;
+ u32 send_host;
+ u32 status_invalid;
+};
+
+struct htt_tx_de_enqueue_packets_stats_tlv {
+ u32 enqueued_pkts;
+ u32 to_tqm;
+ u32 to_tqm_bypass;
+};
+
+struct htt_tx_de_enqueue_discard_stats_tlv {
+ u32 discarded_pkts;
+ u32 local_frames;
+ u32 is_ext_msdu;
+};
+
+struct htt_tx_de_compl_stats_tlv {
+ u32 tcl_dummy_frame;
+ u32 tqm_dummy_frame;
+ u32 tqm_notify_frame;
+ u32 fw2wbm_enq;
+ u32 tqm_bypass_frame;
+};
+
+/*
+ * The htt_tx_de_fw2wbm_ring_full_hist_tlv is a histogram of time we waited
+ * for the fw2wbm ring buffer. we are requesting a buffer in FW2WBM release
+ * ring,which may fail, due to non availability of buffer. Hence we sleep for
+ * 200us & again request for it. This is a histogram of time we wait, with
+ * bin of 200ms & there are 10 bin (2 seconds max)
+ * They are defined by the following macros in FW
+ * #define ENTRIES_PER_BIN_COUNT 1000 // per bin 1000 * 200us = 200ms
+ * #define RING_FULL_BIN_ENTRIES (WAL_TX_DE_FW2WBM_ALLOC_TIMEOUT_COUNT /
+ * ENTRIES_PER_BIN_COUNT)
+ */
+struct htt_tx_de_fw2wbm_ring_full_hist_tlv {
+ DECLARE_FLEX_ARRAY(u32, fw2wbm_ring_full_hist);
+};
+
+struct htt_tx_de_cmn_stats_tlv {
+ u32 mac_id__word;
+
+ /* Global Stats */
+ u32 tcl2fw_entry_count;
+ u32 not_to_fw;
+ u32 invalid_pdev_vdev_peer;
+ u32 tcl_res_invalid_addrx;
+ u32 wbm2fw_entry_count;
+ u32 invalid_pdev;
+};
+
+/* == RING-IF STATS == */
+#define HTT_STATS_LOW_WM_BINS 5
+#define HTT_STATS_HIGH_WM_BINS 5
+
+#define HTT_RING_IF_STATS_NUM_ELEMS GENMASK(15, 0)
+#define HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX GENMASK(31, 16)
+#define HTT_RING_IF_STATS_HEAD_IDX GENMASK(15, 0)
+#define HTT_RING_IF_STATS_TAIL_IDX GENMASK(31, 16)
+#define HTT_RING_IF_STATS_SHADOW_HEAD_IDX GENMASK(15, 0)
+#define HTT_RING_IF_STATS_SHADOW_TAIL_IDX GENMASK(31, 16)
+#define HTT_RING_IF_STATS_LWM_THRESH GENMASK(15, 0)
+#define HTT_RING_IF_STATS_HWM_THRESH GENMASK(31, 16)
+
+struct htt_ring_if_stats_tlv {
+ u32 base_addr; /* DWORD aligned base memory address of the ring */
+ u32 elem_size;
+ u32 num_elems__prefetch_tail_idx;
+ u32 head_idx__tail_idx;
+ u32 shadow_head_idx__shadow_tail_idx;
+ u32 num_tail_incr;
+ u32 lwm_thresh__hwm_thresh;
+ u32 overrun_hit_count;
+ u32 underrun_hit_count;
+ u32 prod_blockwait_count;
+ u32 cons_blockwait_count;
+ u32 low_wm_hit_count[HTT_STATS_LOW_WM_BINS];
+ u32 high_wm_hit_count[HTT_STATS_HIGH_WM_BINS];
+};
+
+struct htt_ring_if_cmn_tlv {
+ u32 mac_id__word;
+ u32 num_records;
+};
+
+/* == SFM STATS == */
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sfm_client_user_tlv_v {
+ /* Number of DWORDS used per user and per client */
+ DECLARE_FLEX_ARRAY(u32, dwords_used_by_user_n);
+};
+
+struct htt_sfm_client_tlv {
+ /* Client ID */
+ u32 client_id;
+ /* Minimum number of buffers */
+ u32 buf_min;
+ /* Maximum number of buffers */
+ u32 buf_max;
+ /* Number of Busy buffers */
+ u32 buf_busy;
+ /* Number of Allocated buffers */
+ u32 buf_alloc;
+ /* Number of Available/Usable buffers */
+ u32 buf_avail;
+ /* Number of users */
+ u32 num_users;
+};
+
+struct htt_sfm_cmn_tlv {
+ u32 mac_id__word;
+ /* Indicates the total number of 128 byte buffers
+ * in the CMEM that are available for buffer sharing
+ */
+ u32 buf_total;
+ /* Indicates for certain client or all the clients
+ * there is no dowrd saved in SFM, refer to SFM_R1_MEM_EMPTY
+ */
+ u32 mem_empty;
+ /* DEALLOCATE_BUFFERS, refer to register SFM_R0_DEALLOCATE_BUFFERS */
+ u32 deallocate_bufs;
+ /* Number of Records */
+ u32 num_records;
+};
+
+/* == SRNG STATS == */
+#define HTT_SRING_STATS_MAC_ID GENMASK(7, 0)
+#define HTT_SRING_STATS_RING_ID GENMASK(15, 8)
+#define HTT_SRING_STATS_ARENA GENMASK(23, 16)
+#define HTT_SRING_STATS_EP BIT(24)
+#define HTT_SRING_STATS_NUM_AVAIL_WORDS GENMASK(15, 0)
+#define HTT_SRING_STATS_NUM_VALID_WORDS GENMASK(31, 16)
+#define HTT_SRING_STATS_HEAD_PTR GENMASK(15, 0)
+#define HTT_SRING_STATS_TAIL_PTR GENMASK(31, 16)
+#define HTT_SRING_STATS_CONSUMER_EMPTY GENMASK(15, 0)
+#define HTT_SRING_STATS_PRODUCER_FULL GENMASK(31, 16)
+#define HTT_SRING_STATS_PREFETCH_COUNT GENMASK(15, 0)
+#define HTT_SRING_STATS_INTERNAL_TAIL_PTR GENMASK(31, 16)
+
+struct htt_sring_stats_tlv {
+ u32 mac_id__ring_id__arena__ep;
+ u32 base_addr_lsb; /* DWORD aligned base memory address of the ring */
+ u32 base_addr_msb;
+ u32 ring_size;
+ u32 elem_size;
+
+ u32 num_avail_words__num_valid_words;
+ u32 head_ptr__tail_ptr;
+ u32 consumer_empty__producer_full;
+ u32 prefetch_count__internal_tail_ptr;
+};
+
+struct htt_sring_cmn_tlv {
+ u32 num_records;
+};
+
+/* == PDEV TX RATE CTRL STATS == */
+#define HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS 12
+#define HTT_TX_PDEV_STATS_NUM_GI_COUNTERS 4
+#define HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS 5
+#define HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+#define HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
+#define HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define HTT_TX_PDEV_STATS_NUM_LTF 4
+
+#define HTT_TX_NUM_OF_SOUNDING_STATS_WORDS \
+ (HTT_TX_PDEV_STATS_NUM_BW_COUNTERS * \
+ HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS)
+
+struct htt_tx_pdev_rate_stats_tlv {
+ u32 mac_id__word;
+ u32 tx_ldpc;
+ u32 rts_cnt;
+ /* RSSI value of last ack packet (units = dB above noise floor) */
+ u32 ack_rssi;
+
+ u32 tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ u32 tx_su_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 tx_mu_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 tx_stbc[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 tx_pream[HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+
+ /* Counters to track number of tx packets
+ * in each GI (400us, 800us, 1600us & 3200us) in each mcs (0-11)
+ */
+ u32 tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS][HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ /* Counters to track packets in dcm mcs (MCS 0, 1, 3, 4) */
+ u32 tx_dcm[HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS];
+ /* Number of CTS-acknowledged RTS packets */
+ u32 rts_success;
+
+ /*
+ * Counters for legacy 11a and 11b transmissions.
+ *
+ * The index corresponds to:
+ *
+ * CCK: 0: 1 Mbps, 1: 2 Mbps, 2: 5.5 Mbps, 3: 11 Mbps
+ *
+ * OFDM: 0: 6 Mbps, 1: 9 Mbps, 2: 12 Mbps, 3: 18 Mbps,
+ * 4: 24 Mbps, 5: 36 Mbps, 6: 48 Mbps, 7: 54 Mbps
+ */
+ u32 tx_legacy_cck_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+ u32 tx_legacy_ofdm_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+
+ u32 ac_mu_mimo_tx_ldpc;
+ u32 ax_mu_mimo_tx_ldpc;
+ u32 ofdma_tx_ldpc;
+
+ /*
+ * Counters for 11ax HE LTF selection during TX.
+ *
+ * The index corresponds to:
+ *
+ * 0: unused, 1: 1x LTF, 2: 2x LTF, 3: 4x LTF
+ */
+ u32 tx_he_ltf[HTT_TX_PDEV_STATS_NUM_LTF];
+
+ u32 ac_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ax_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ofdma_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ u32 ac_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 ax_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 ofdma_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+
+ u32 ac_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 ax_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 ofdma_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+
+ u32 ac_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ax_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ofdma_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+};
+
+/* == PDEV RX RATE CTRL STATS == */
+#define HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
+#define HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS 12
+#define HTT_RX_PDEV_STATS_NUM_GI_COUNTERS 4
+#define HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS 5
+#define HTT_RX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+#define HTT_RX_PDEV_MAX_OFDMA_NUM_USER 8
+#define HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS 16
+#define HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS 6
+#define HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER 8
+
+struct htt_rx_pdev_rate_stats_tlv {
+ u32 mac_id__word;
+ u32 nsts;
+
+ u32 rx_ldpc;
+ u32 rts_cnt;
+
+ u32 rssi_mgmt; /* units = dB above noise floor */
+ u32 rssi_data; /* units = dB above noise floor */
+ u32 rssi_comb; /* units = dB above noise floor */
+ u32 rx_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 rx_nss[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 rx_dcm[HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS];
+ u32 rx_stbc[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 rx_bw[HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 rx_pream[HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+ u8 rssi_chain[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ /* units = dB above noise floor */
+
+ /* Counters to track number of rx packets
+ * in each GI in each mcs (0-11)
+ */
+ u32 rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS][HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ s32 rssi_in_dbm; /* rx Signal Strength value in dBm unit */
+
+ u32 rx_11ax_su_ext;
+ u32 rx_11ac_mumimo;
+ u32 rx_11ax_mumimo;
+ u32 rx_11ax_ofdma;
+ u32 txbf;
+ u32 rx_legacy_cck_rate[HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+ u32 rx_legacy_ofdm_rate[HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+ u32 rx_active_dur_us_low;
+ u32 rx_active_dur_us_high;
+
+ u32 rx_11ax_ul_ofdma;
+
+ u32 ul_ofdma_rx_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ul_ofdma_rx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ul_ofdma_rx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 ul_ofdma_rx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 ul_ofdma_rx_stbc;
+ u32 ul_ofdma_rx_ldpc;
+
+ /* record the stats for each user index */
+ u32 rx_ulofdma_non_data_ppdu[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* ppdu level */
+ u32 rx_ulofdma_data_ppdu[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* ppdu level */
+ u32 rx_ulofdma_mpdu_ok[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* mpdu level */
+ u32 rx_ulofdma_mpdu_fail[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* mpdu level */
+
+ u32 nss_count;
+ u32 pilot_count;
+ /* RxEVM stats in dB */
+ s32 rx_pilot_evm_db[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS];
+ /* rx_pilot_evm_db_mean:
+ * EVM mean across pilots, computed as
+ * mean(10*log10(rx_pilot_evm_linear)) = mean(rx_pilot_evm_db)
+ */
+ s32 rx_pilot_evm_db_mean[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ s8 rx_ul_fd_rssi[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* dBm units */
+ /* per_chain_rssi_pkt_type:
+ * This field shows what type of rx frame the per-chain RSSI was computed
+ * on, by recording the frame type and sub-type as bit-fields within this
+ * field:
+ * BIT [3 : 0] :- IEEE80211_FC0_TYPE
+ * BIT [7 : 4] :- IEEE80211_FC0_SUBTYPE
+ * BIT [31 : 8] :- Reserved
+ */
+ u32 per_chain_rssi_pkt_type;
+ s8 rx_per_chain_rssi_in_dbm[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+
+ u32 rx_su_ndpa;
+ u32 rx_11ax_su_txbf_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 rx_mu_ndpa;
+ u32 rx_11ax_mu_txbf_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 rx_br_poll;
+ u32 rx_11ax_dl_ofdma_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 rx_11ax_dl_ofdma_ru[HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS];
+
+ u32 rx_ulmumimo_non_data_ppdu[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulmumimo_data_ppdu[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulmumimo_mpdu_ok[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulmumimo_mpdu_fail[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulofdma_non_data_nusers[HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ u32 rx_ulofdma_data_nusers[HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+};
+
+/* == RX PDEV/SOC STATS == */
+struct htt_rx_soc_fw_stats_tlv {
+ u32 fw_reo_ring_data_msdu;
+ u32 fw_to_host_data_msdu_bcmc;
+ u32 fw_to_host_data_msdu_uc;
+ u32 ofld_remote_data_buf_recycle_cnt;
+ u32 ofld_remote_free_buf_indication_cnt;
+
+ u32 ofld_buf_to_host_data_msdu_uc;
+ u32 reo_fw_ring_to_host_data_msdu_uc;
+
+ u32 wbm_sw_ring_reap;
+ u32 wbm_forward_to_host_cnt;
+ u32 wbm_target_recycle_cnt;
+
+ u32 target_refill_ring_recycle_cnt;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_empty_tlv_v {
+ /* HTT_RX_STATS_REFILL_MAX_RING */
+ DECLARE_FLEX_ARRAY(u32, refill_ring_empty_cnt);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v {
+ /* HTT_RX_STATS_REFILL_MAX_RING */
+ DECLARE_FLEX_ARRAY(u32, refill_ring_num_refill);
+};
+
+/* RXDMA error code from WBM released packets */
+enum htt_rx_rxdma_error_code_enum {
+ HTT_RX_RXDMA_OVERFLOW_ERR = 0,
+ HTT_RX_RXDMA_MPDU_LENGTH_ERR = 1,
+ HTT_RX_RXDMA_FCS_ERR = 2,
+ HTT_RX_RXDMA_DECRYPT_ERR = 3,
+ HTT_RX_RXDMA_TKIP_MIC_ERR = 4,
+ HTT_RX_RXDMA_UNECRYPTED_ERR = 5,
+ HTT_RX_RXDMA_MSDU_LEN_ERR = 6,
+ HTT_RX_RXDMA_MSDU_LIMIT_ERR = 7,
+ HTT_RX_RXDMA_WIFI_PARSE_ERR = 8,
+ HTT_RX_RXDMA_AMSDU_PARSE_ERR = 9,
+ HTT_RX_RXDMA_SA_TIMEOUT_ERR = 10,
+ HTT_RX_RXDMA_DA_TIMEOUT_ERR = 11,
+ HTT_RX_RXDMA_FLOW_TIMEOUT_ERR = 12,
+ HTT_RX_RXDMA_FLUSH_REQUEST = 13,
+ HTT_RX_RXDMA_ERR_CODE_RVSD0 = 14,
+ HTT_RX_RXDMA_ERR_CODE_RVSD1 = 15,
+
+ /* This MAX_ERR_CODE should not be used in any host/target messages,
+ * so that even though it is defined within a host/target interface
+ * definition header file, it isn't actually part of the host/target
+ * interface, and thus can be modified.
+ */
+ HTT_RX_RXDMA_MAX_ERR_CODE
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v {
+ DECLARE_FLEX_ARRAY(u32, rxdma_err); /* HTT_RX_RXDMA_MAX_ERR_CODE */
+};
+
+/* REO error code from WBM released packets */
+enum htt_rx_reo_error_code_enum {
+ HTT_RX_REO_QUEUE_DESC_ADDR_ZERO = 0,
+ HTT_RX_REO_QUEUE_DESC_NOT_VALID = 1,
+ HTT_RX_AMPDU_IN_NON_BA = 2,
+ HTT_RX_NON_BA_DUPLICATE = 3,
+ HTT_RX_BA_DUPLICATE = 4,
+ HTT_RX_REGULAR_FRAME_2K_JUMP = 5,
+ HTT_RX_BAR_FRAME_2K_JUMP = 6,
+ HTT_RX_REGULAR_FRAME_OOR = 7,
+ HTT_RX_BAR_FRAME_OOR = 8,
+ HTT_RX_BAR_FRAME_NO_BA_SESSION = 9,
+ HTT_RX_BAR_FRAME_SN_EQUALS_SSN = 10,
+ HTT_RX_PN_CHECK_FAILED = 11,
+ HTT_RX_2K_ERROR_HANDLING_FLAG_SET = 12,
+ HTT_RX_PN_ERROR_HANDLING_FLAG_SET = 13,
+ HTT_RX_QUEUE_DESCRIPTOR_BLOCKED_SET = 14,
+ HTT_RX_REO_ERR_CODE_RVSD = 15,
+
+ /* This MAX_ERR_CODE should not be used in any host/target messages,
+ * so that even though it is defined within a host/target interface
+ * definition header file, it isn't actually part of the host/target
+ * interface, and thus can be modified.
+ */
+ HTT_RX_REO_MAX_ERR_CODE
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v {
+ DECLARE_FLEX_ARRAY(u32, reo_err); /* HTT_RX_REO_MAX_ERR_CODE */
+};
+
+/* == RX PDEV STATS == */
+#define HTT_STATS_SUBTYPE_MAX 16
+
+struct htt_rx_pdev_fw_stats_tlv {
+ u32 mac_id__word;
+ u32 ppdu_recvd;
+ u32 mpdu_cnt_fcs_ok;
+ u32 mpdu_cnt_fcs_err;
+ u32 tcp_msdu_cnt;
+ u32 tcp_ack_msdu_cnt;
+ u32 udp_msdu_cnt;
+ u32 other_msdu_cnt;
+ u32 fw_ring_mpdu_ind;
+ u32 fw_ring_mgmt_subtype[HTT_STATS_SUBTYPE_MAX];
+ u32 fw_ring_ctrl_subtype[HTT_STATS_SUBTYPE_MAX];
+ u32 fw_ring_mcast_data_msdu;
+ u32 fw_ring_bcast_data_msdu;
+ u32 fw_ring_ucast_data_msdu;
+ u32 fw_ring_null_data_msdu;
+ u32 fw_ring_mpdu_drop;
+ u32 ofld_local_data_ind_cnt;
+ u32 ofld_local_data_buf_recycle_cnt;
+ u32 drx_local_data_ind_cnt;
+ u32 drx_local_data_buf_recycle_cnt;
+ u32 local_nondata_ind_cnt;
+ u32 local_nondata_buf_recycle_cnt;
+
+ u32 fw_status_buf_ring_refill_cnt;
+ u32 fw_status_buf_ring_empty_cnt;
+ u32 fw_pkt_buf_ring_refill_cnt;
+ u32 fw_pkt_buf_ring_empty_cnt;
+ u32 fw_link_buf_ring_refill_cnt;
+ u32 fw_link_buf_ring_empty_cnt;
+
+ u32 host_pkt_buf_ring_refill_cnt;
+ u32 host_pkt_buf_ring_empty_cnt;
+ u32 mon_pkt_buf_ring_refill_cnt;
+ u32 mon_pkt_buf_ring_empty_cnt;
+ u32 mon_status_buf_ring_refill_cnt;
+ u32 mon_status_buf_ring_empty_cnt;
+ u32 mon_desc_buf_ring_refill_cnt;
+ u32 mon_desc_buf_ring_empty_cnt;
+ u32 mon_dest_ring_update_cnt;
+ u32 mon_dest_ring_full_cnt;
+
+ u32 rx_suspend_cnt;
+ u32 rx_suspend_fail_cnt;
+ u32 rx_resume_cnt;
+ u32 rx_resume_fail_cnt;
+ u32 rx_ring_switch_cnt;
+ u32 rx_ring_restore_cnt;
+ u32 rx_flush_cnt;
+ u32 rx_recovery_reset_cnt;
+};
+
+#define HTT_STATS_PHY_ERR_MAX 43
+
+struct htt_rx_pdev_fw_stats_phy_err_tlv {
+ u32 mac_id__word;
+ u32 total_phy_err_cnt;
+ /* Counts of different types of phy errs
+ * The mapping of PHY error types to phy_err array elements is HW dependent.
+ * The only currently-supported mapping is shown below:
+ *
+ * 0 phyrx_err_phy_off Reception aborted due to receiving a PHY_OFF TLV
+ * 1 phyrx_err_synth_off
+ * 2 phyrx_err_ofdma_timing
+ * 3 phyrx_err_ofdma_signal_parity
+ * 4 phyrx_err_ofdma_rate_illegal
+ * 5 phyrx_err_ofdma_length_illegal
+ * 6 phyrx_err_ofdma_restart
+ * 7 phyrx_err_ofdma_service
+ * 8 phyrx_err_ppdu_ofdma_power_drop
+ * 9 phyrx_err_cck_blokker
+ * 10 phyrx_err_cck_timing
+ * 11 phyrx_err_cck_header_crc
+ * 12 phyrx_err_cck_rate_illegal
+ * 13 phyrx_err_cck_length_illegal
+ * 14 phyrx_err_cck_restart
+ * 15 phyrx_err_cck_service
+ * 16 phyrx_err_cck_power_drop
+ * 17 phyrx_err_ht_crc_err
+ * 18 phyrx_err_ht_length_illegal
+ * 19 phyrx_err_ht_rate_illegal
+ * 20 phyrx_err_ht_zlf
+ * 21 phyrx_err_false_radar_ext
+ * 22 phyrx_err_green_field
+ * 23 phyrx_err_bw_gt_dyn_bw
+ * 24 phyrx_err_leg_ht_mismatch
+ * 25 phyrx_err_vht_crc_error
+ * 26 phyrx_err_vht_siga_unsupported
+ * 27 phyrx_err_vht_lsig_len_invalid
+ * 28 phyrx_err_vht_ndp_or_zlf
+ * 29 phyrx_err_vht_nsym_lt_zero
+ * 30 phyrx_err_vht_rx_extra_symbol_mismatch
+ * 31 phyrx_err_vht_rx_skip_group_id0
+ * 32 phyrx_err_vht_rx_skip_group_id1to62
+ * 33 phyrx_err_vht_rx_skip_group_id63
+ * 34 phyrx_err_ofdm_ldpc_decoder_disabled
+ * 35 phyrx_err_defer_nap
+ * 36 phyrx_err_fdomain_timeout
+ * 37 phyrx_err_lsig_rel_check
+ * 38 phyrx_err_bt_collision
+ * 39 phyrx_err_unsupported_mu_feedback
+ * 40 phyrx_err_ppdu_tx_interrupt_rx
+ * 41 phyrx_err_unsupported_cbf
+ * 42 phyrx_err_other
+ */
+ u32 phy_err[HTT_STATS_PHY_ERR_MAX];
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v {
+ /* Num error MPDU for each RxDMA error type */
+ DECLARE_FLEX_ARRAY(u32, fw_ring_mpdu_err); /* HTT_RX_STATS_RXDMA_MAX_ERR */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_pdev_fw_mpdu_drop_tlv_v {
+ /* Num MPDU dropped */
+ DECLARE_FLEX_ARRAY(u32, fw_mpdu_drop); /* HTT_RX_STATS_FW_DROP_REASON_MAX */
+};
+
+#define HTT_PDEV_CCA_STATS_TX_FRAME_INFO_PRESENT (0x1)
+#define HTT_PDEV_CCA_STATS_RX_FRAME_INFO_PRESENT (0x2)
+#define HTT_PDEV_CCA_STATS_RX_CLEAR_INFO_PRESENT (0x4)
+#define HTT_PDEV_CCA_STATS_MY_RX_FRAME_INFO_PRESENT (0x8)
+#define HTT_PDEV_CCA_STATS_USEC_CNT_INFO_PRESENT (0x10)
+#define HTT_PDEV_CCA_STATS_MED_RX_IDLE_INFO_PRESENT (0x20)
+#define HTT_PDEV_CCA_STATS_MED_TX_IDLE_GLOBAL_INFO_PRESENT (0x40)
+#define HTT_PDEV_CCA_STATS_CCA_OBBS_USEC_INFO_PRESENT (0x80)
+
+struct htt_pdev_stats_cca_counters_tlv {
+ /* Below values are obtained from the HW Cycles counter registers */
+ u32 tx_frame_usec;
+ u32 rx_frame_usec;
+ u32 rx_clear_usec;
+ u32 my_rx_frame_usec;
+ u32 usec_cnt;
+ u32 med_rx_idle_usec;
+ u32 med_tx_idle_global_usec;
+ u32 cca_obss_usec;
+};
+
+struct htt_pdev_cca_stats_hist_v1_tlv {
+ u32 chan_num;
+ /* num of CCA records (Num of htt_pdev_stats_cca_counters_tlv)*/
+ u32 num_records;
+ u32 valid_cca_counters_bitmap;
+ u32 collection_interval;
+
+ /* This will be followed by an array which contains the CCA stats
+ * collected in the last N intervals,
+ * if the indication is for last N intervals CCA stats.
+ * Then the pdev_cca_stats[0] element contains the oldest CCA stats
+ * and pdev_cca_stats[N-1] will have the most recent CCA stats.
+ * htt_pdev_stats_cca_counters_tlv cca_hist_tlv[1];
+ */
+};
+
+struct htt_pdev_stats_twt_session_tlv {
+ u32 vdev_id;
+ struct htt_mac_addr peer_mac;
+ u32 flow_id_flags;
+
+ /* TWT_DIALOG_ID_UNAVAILABLE is used
+ * when TWT session is not initiated by host
+ */
+ u32 dialog_id;
+ u32 wake_dura_us;
+ u32 wake_intvl_us;
+ u32 sp_offset_us;
+};
+
+struct htt_pdev_stats_twt_sessions_tlv {
+ u32 pdev_id;
+ u32 num_sessions;
+ struct htt_pdev_stats_twt_session_tlv twt_session[];
+};
+
+enum htt_rx_reo_resource_sample_id_enum {
+ /* Global link descriptor queued in REO */
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_0 = 0,
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_1 = 1,
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_2 = 2,
+ /*Number of queue descriptors of this aging group */
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC0 = 3,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC1 = 4,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC2 = 5,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC3 = 6,
+ /* Total number of MSDUs buffered in AC */
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC0 = 7,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC1 = 8,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC2 = 9,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC3 = 10,
+
+ HTT_RX_REO_RESOURCE_STATS_MAX = 16
+};
+
+struct htt_rx_reo_resource_stats_tlv_v {
+ /* Variable based on the Number of records. HTT_RX_REO_RESOURCE_STATS_MAX */
+ u32 sample_id;
+ u32 total_max;
+ u32 total_avg;
+ u32 total_sample;
+ u32 non_zeros_avg;
+ u32 non_zeros_sample;
+ u32 last_non_zeros_max;
+ u32 last_non_zeros_min;
+ u32 last_non_zeros_avg;
+ u32 last_non_zeros_sample;
+};
+
+/* == TX SOUNDING STATS == */
+
+enum htt_txbf_sound_steer_modes {
+ HTT_IMPLICIT_TXBF_STEER_STATS = 0,
+ HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS = 1,
+ HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS = 2,
+ HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS = 3,
+ HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS = 4,
+ HTT_TXBF_MAX_NUM_OF_MODES = 5
+};
+
+enum htt_stats_sounding_tx_mode {
+ HTT_TX_AC_SOUNDING_MODE = 0,
+ HTT_TX_AX_SOUNDING_MODE = 1,
+};
+
+struct htt_tx_sounding_stats_tlv {
+ u32 tx_sounding_mode; /* HTT_TX_XX_SOUNDING_MODE */
+ /* Counts number of soundings for all steering modes in each bw */
+ u32 cbf_20[HTT_TXBF_MAX_NUM_OF_MODES];
+ u32 cbf_40[HTT_TXBF_MAX_NUM_OF_MODES];
+ u32 cbf_80[HTT_TXBF_MAX_NUM_OF_MODES];
+ u32 cbf_160[HTT_TXBF_MAX_NUM_OF_MODES];
+ /*
+ * The sounding array is a 2-D array stored as an 1-D array of
+ * u32. The stats for a particular user/bw combination is
+ * referenced with the following:
+ *
+ * sounding[(user* max_bw) + bw]
+ *
+ * ... where max_bw == 4 for 160mhz
+ */
+ u32 sounding[HTT_TX_NUM_OF_SOUNDING_STATS_WORDS];
+};
+
+struct htt_pdev_obss_pd_stats_tlv {
+ u32 num_obss_tx_ppdu_success;
+ u32 num_obss_tx_ppdu_failure;
+ u32 num_sr_tx_transmissions;
+ u32 num_spatial_reuse_opportunities;
+ u32 num_non_srg_opportunities;
+ u32 num_non_srg_ppdu_tried;
+ u32 num_non_srg_ppdu_success;
+ u32 num_srg_opportunities;
+ u32 num_srg_ppdu_tried;
+ u32 num_srg_ppdu_success;
+ u32 num_psr_opportunities;
+ u32 num_psr_ppdu_tried;
+ u32 num_psr_ppdu_success;
+};
+
+struct htt_ring_backpressure_stats_tlv {
+ u32 pdev_id;
+ u32 current_head_idx;
+ u32 current_tail_idx;
+ u32 num_htt_msgs_sent;
+ /* Time in milliseconds for which the ring has been in
+ * its current backpressure condition
+ */
+ u32 backpressure_time_ms;
+ /* backpressure_hist - histogram showing how many times
+ * different degrees of backpressure duration occurred:
+ * Index 0 indicates the number of times ring was
+ * continuously in backpressure state for 100 - 200ms.
+ * Index 1 indicates the number of times ring was
+ * continuously in backpressure state for 200 - 300ms.
+ * Index 2 indicates the number of times ring was
+ * continuously in backpressure state for 300 - 400ms.
+ * Index 3 indicates the number of times ring was
+ * continuously in backpressure state for 400 - 500ms.
+ * Index 4 indicates the number of times ring was
+ * continuously in backpressure state beyond 500ms.
+ */
+ u32 backpressure_hist[5];
+};
+
+#define HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS 14
+#define HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS 5
+#define HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+
+struct htt_pdev_txrate_txbf_stats_tlv {
+ /* SU TxBF TX MCS stats */
+ u32 tx_su_txbf_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+ /* Implicit BF TX MCS stats */
+ u32 tx_su_ibf_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+ /* Open loop TX MCS stats */
+ u32 tx_su_ol_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+ /* SU TxBF TX NSS stats */
+ u32 tx_su_txbf_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* Implicit BF TX NSS stats */
+ u32 tx_su_ibf_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* Open loop TX NSS stats */
+ u32 tx_su_ol_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* SU TxBF TX BW stats */
+ u32 tx_su_txbf_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+ /* Implicit BF TX BW stats */
+ u32 tx_su_ibf_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+ /* Open loop TX BW stats */
+ u32 tx_su_ol_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+};
+
+struct htt_txbf_ofdma_ndpa_stats_tlv {
+ /* 11AX HE OFDMA NDPA frame queued to the HW */
+ u32 ax_ofdma_ndpa_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame sent over the air */
+ u32 ax_ofdma_ndpa_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame flushed by HW */
+ u32 ax_ofdma_ndpa_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame completed with error(s) */
+ u32 ax_ofdma_ndpa_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_txbf_ofdma_ndp_stats_tlv {
+ /* 11AX HE OFDMA NDP frame queued to the HW */
+ u32 ax_ofdma_ndp_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame sent over the air */
+ u32 ax_ofdma_ndp_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame flushed by HW */
+ u32 ax_ofdma_ndp_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame completed with error(s) */
+ u32 ax_ofdma_ndp_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_txbf_ofdma_brp_stats_tlv {
+ /* 11AX HE OFDMA MU BRPOLL frame queued to the HW */
+ u32 ax_ofdma_brpoll_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA MU BRPOLL frame sent over the air */
+ u32 ax_ofdma_brpoll_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA MU BRPOLL frame flushed by HW */
+ u32 ax_ofdma_brpoll_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA MU BRPOLL frame completed with error(s) */
+ u32 ax_ofdma_brp_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* Number of CBF(s) received when 11AX HE OFDMA MU BRPOLL frame
+ * completed with error(s).
+ */
+ u32 ax_ofdma_brp_err_num_cbf_rcvd[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS + 1];
+};
+
+struct htt_txbf_ofdma_steer_stats_tlv {
+ /* 11AX HE OFDMA PPDUs that were sent over the air with steering (TXBF + OFDMA) */
+ u32 ax_ofdma_num_ppdu_steer[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA PPDUs that were sent over the air in open loop */
+ u32 ax_ofdma_num_ppdu_ol[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA number of users for which CBF prefetch was
+ * initiated to PHY HW during TX.
+ */
+ u32 ax_ofdma_num_usrs_prefetch[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA number of users for which sounding was initiated during TX */
+ u32 ax_ofdma_num_usrs_sound[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA number of users for which sounding was forced during TX */
+ u32 ax_ofdma_num_usrs_force_sound[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+#define HTT_MAX_RX_PKT_CNT 8
+#define HTT_MAX_RX_PKT_CRC_PASS_CNT 8
+#define HTT_MAX_PER_BLK_ERR_CNT 20
+#define HTT_MAX_RX_OTA_ERR_CNT 14
+#define HTT_STATS_MAX_CHAINS 8
+#define ATH11K_STATS_MGMT_FRM_TYPE_MAX 16
+
+struct htt_phy_counters_tlv {
+ /* number of RXTD OFDMA OTA error counts except power surge and drop */
+ u32 rx_ofdma_timing_err_cnt;
+ /* rx_cck_fail_cnt:
+ * number of cck error counts due to rx reception failure because of
+ * timing error in cck
+ */
+ u32 rx_cck_fail_cnt;
+ /* number of times tx abort initiated by mac */
+ u32 mactx_abort_cnt;
+ /* number of times rx abort initiated by mac */
+ u32 macrx_abort_cnt;
+ /* number of times tx abort initiated by phy */
+ u32 phytx_abort_cnt;
+ /* number of times rx abort initiated by phy */
+ u32 phyrx_abort_cnt;
+ /* number of rx deferred count initiated by phy */
+ u32 phyrx_defer_abort_cnt;
+ /* number of sizing events generated at LSTF */
+ u32 rx_gain_adj_lstf_event_cnt;
+ /* number of sizing events generated at non-legacy LTF */
+ u32 rx_gain_adj_non_legacy_cnt;
+ /* rx_pkt_cnt -
+ * Received EOP (end-of-packet) count per packet type;
+ * [0] = 11a; [1] = 11b; [2] = 11n; [3] = 11ac; [4] = 11ax; [5] = GF
+ * [6-7]=RSVD
+ */
+ u32 rx_pkt_cnt[HTT_MAX_RX_PKT_CNT];
+ /* rx_pkt_crc_pass_cnt -
+ * Received EOP (end-of-packet) count per packet type;
+ * [0] = 11a; [1] = 11b; [2] = 11n; [3] = 11ac; [4] = 11ax; [5] = GF
+ * [6-7]=RSVD
+ */
+ u32 rx_pkt_crc_pass_cnt[HTT_MAX_RX_PKT_CRC_PASS_CNT];
+ /* per_blk_err_cnt -
+ * Error count per error source;
+ * [0] = unknown; [1] = LSIG; [2] = HTSIG; [3] = VHTSIG; [4] = HESIG;
+ * [5] = RXTD_OTA; [6] = RXTD_FATAL; [7] = DEMF; [8] = ROBE;
+ * [9] = PMI; [10] = TXFD; [11] = TXTD; [12] = PHYRF
+ * [13-19]=RSVD
+ */
+ u32 per_blk_err_cnt[HTT_MAX_PER_BLK_ERR_CNT];
+ /* rx_ota_err_cnt -
+ * RXTD OTA (over-the-air) error count per error reason;
+ * [0] = voting fail; [1] = weak det fail; [2] = strong sig fail;
+ * [3] = cck fail; [4] = power surge; [5] = power drop;
+ * [6] = btcf timing timeout error; [7] = btcf packet detect error;
+ * [8] = coarse timing timeout error
+ * [9-13]=RSVD
+ */
+ u32 rx_ota_err_cnt[HTT_MAX_RX_OTA_ERR_CNT];
+};
+
+struct htt_phy_stats_tlv {
+ /* per chain hw noise floor values in dBm */
+ s32 nf_chain[HTT_STATS_MAX_CHAINS];
+ /* number of false radars detected */
+ u32 false_radar_cnt;
+ /* number of channel switches happened due to radar detection */
+ u32 radar_cs_cnt;
+ /* ani_level -
+ * ANI level (noise interference) corresponds to the channel
+ * the desense levels range from -5 to 15 in dB units,
+ * higher values indicating more noise interference.
+ */
+ s32 ani_level;
+ /* running time in minutes since FW boot */
+ u32 fw_run_time;
+};
+
+struct htt_phy_reset_counters_tlv {
+ u32 pdev_id;
+ u32 cf_active_low_fail_cnt;
+ u32 cf_active_low_pass_cnt;
+ u32 phy_off_through_vreg_cnt;
+ u32 force_calibration_cnt;
+ u32 rf_mode_switch_phy_off_cnt;
+};
+
+struct htt_phy_reset_stats_tlv {
+ u32 pdev_id;
+ u32 chan_mhz;
+ u32 chan_band_center_freq1;
+ u32 chan_band_center_freq2;
+ u32 chan_phy_mode;
+ u32 chan_flags;
+ u32 chan_num;
+ u32 reset_cause;
+ u32 prev_reset_cause;
+ u32 phy_warm_reset_src;
+ u32 rx_gain_tbl_mode;
+ u32 xbar_val;
+ u32 force_calibration;
+ u32 phyrf_mode;
+ u32 phy_homechan;
+ u32 phy_tx_ch_mask;
+ u32 phy_rx_ch_mask;
+ u32 phybb_ini_mask;
+ u32 phyrf_ini_mask;
+ u32 phy_dfs_en_mask;
+ u32 phy_sscan_en_mask;
+ u32 phy_synth_sel_mask;
+ u32 phy_adfs_freq;
+ u32 cck_fir_settings;
+ u32 phy_dyn_pri_chan;
+ u32 cca_thresh;
+ u32 dyn_cca_status;
+ u32 rxdesense_thresh_hw;
+ u32 rxdesense_thresh_sw;
+};
+
+struct htt_peer_ctrl_path_txrx_stats_tlv {
+ /* peer mac address */
+ u8 peer_mac_addr[ETH_ALEN];
+ u8 rsvd[2];
+ /* Num of tx mgmt frames with subtype on peer level */
+ u32 peer_tx_mgmt_subtype[ATH11K_STATS_MGMT_FRM_TYPE_MAX];
+ /* Num of rx mgmt frames with subtype on peer level */
+ u32 peer_rx_mgmt_subtype[ATH11K_STATS_MGMT_FRM_TYPE_MAX];
+};
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+
+void ath11k_debugfs_htt_stats_init(struct ath11k *ar);
+void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb);
+int ath11k_debugfs_htt_stats_req(struct ath11k *ar);
+
+#else /* CONFIG_ATH11K_DEBUGFS */
+
+static inline void ath11k_debugfs_htt_stats_init(struct ath11k *ar)
+{
+}
+
+static inline void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+}
+
+static inline int ath11k_debugfs_htt_stats_req(struct ath11k *ar)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ATH11K_DEBUGFS */
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
new file mode 100644
index 000000000000..d89d0f28d890
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -0,0 +1,890 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/vmalloc.h>
+
+#include "debugfs_sta.h"
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+#include "dp_tx.h"
+#include "debugfs_htt_stats.h"
+
+void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx)
+{
+ struct rate_info *txrate = &arsta->txrate;
+ struct ath11k_htt_tx_stats *tx_stats;
+ int gi, mcs, bw, nss;
+
+ if (!arsta->tx_stats)
+ return;
+
+ tx_stats = arsta->tx_stats;
+ gi = FIELD_GET(RATE_INFO_FLAGS_SHORT_GI, arsta->txrate.flags);
+ mcs = txrate->mcs;
+ bw = ath11k_mac_mac80211_bw_to_ath11k_bw(txrate->bw);
+ nss = txrate->nss - 1;
+
+#define STATS_OP_FMT(name) tx_stats->stats[ATH11K_STATS_TYPE_##name]
+
+ if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ STATS_OP_FMT(SUCC).he[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).he[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).he[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).he[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).he[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).he[1][mcs] += peer_stats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
+ STATS_OP_FMT(SUCC).vht[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).vht[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).vht[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).vht[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).vht[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).vht[1][mcs] += peer_stats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(SUCC).ht[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).ht[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).ht[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).ht[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).ht[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).ht[1][mcs] += peer_stats->retry_pkts;
+ } else {
+ mcs = legacy_rate_idx;
+
+ STATS_OP_FMT(SUCC).legacy[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).legacy[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).legacy[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).legacy[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).legacy[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).legacy[1][mcs] += peer_stats->retry_pkts;
+ }
+
+ if (peer_stats->is_ampdu) {
+ tx_stats->ba_fails += peer_stats->ba_fails;
+
+ if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ STATS_OP_FMT(AMPDU).he[0][mcs] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).he[1][mcs] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(AMPDU).ht[0][mcs] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).ht[1][mcs] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ } else {
+ STATS_OP_FMT(AMPDU).vht[0][mcs] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).vht[1][mcs] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ }
+ STATS_OP_FMT(AMPDU).bw[0][bw] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).nss[0][nss] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).gi[0][gi] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).bw[1][bw] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ STATS_OP_FMT(AMPDU).nss[1][nss] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ STATS_OP_FMT(AMPDU).gi[1][gi] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ } else {
+ tx_stats->ack_fails += peer_stats->ba_fails;
+ }
+
+ STATS_OP_FMT(SUCC).bw[0][bw] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).nss[0][nss] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).gi[0][gi] += peer_stats->succ_bytes;
+
+ STATS_OP_FMT(SUCC).bw[1][bw] += peer_stats->succ_pkts;
+ STATS_OP_FMT(SUCC).nss[1][nss] += peer_stats->succ_pkts;
+ STATS_OP_FMT(SUCC).gi[1][gi] += peer_stats->succ_pkts;
+
+ STATS_OP_FMT(FAIL).bw[0][bw] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).nss[0][nss] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).gi[0][gi] += peer_stats->failed_bytes;
+
+ STATS_OP_FMT(FAIL).bw[1][bw] += peer_stats->failed_pkts;
+ STATS_OP_FMT(FAIL).nss[1][nss] += peer_stats->failed_pkts;
+ STATS_OP_FMT(FAIL).gi[1][gi] += peer_stats->failed_pkts;
+
+ STATS_OP_FMT(RETRY).bw[0][bw] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).nss[0][nss] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).gi[0][gi] += peer_stats->retry_bytes;
+
+ STATS_OP_FMT(RETRY).bw[1][bw] += peer_stats->retry_pkts;
+ STATS_OP_FMT(RETRY).nss[1][nss] += peer_stats->retry_pkts;
+ STATS_OP_FMT(RETRY).gi[1][gi] += peer_stats->retry_pkts;
+
+ tx_stats->tx_duration += peer_stats->duration;
+}
+
+void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
+ struct hal_tx_status *ts)
+{
+ ath11k_dp_tx_update_txcompl(ar, ts);
+}
+
+static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ struct ath11k_htt_data_stats *stats;
+ static const char *str_name[ATH11K_STATS_TYPE_MAX] = {"succ", "fail",
+ "retry", "ampdu"};
+ static const char *str[ATH11K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
+ int len = 0, i, j, k, retval = 0;
+ const int size = 2 * 4096;
+ char *buf;
+
+ if (!arsta->tx_stats)
+ return -ENOENT;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ for (k = 0; k < ATH11K_STATS_TYPE_MAX; k++) {
+ for (j = 0; j < ATH11K_COUNTER_TYPE_MAX; j++) {
+ stats = &arsta->tx_stats->stats[k];
+ len += scnprintf(buf + len, size - len, "%s_%s\n",
+ str_name[k],
+ str[j]);
+ len += scnprintf(buf + len, size - len,
+ " HE MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH11K_HE_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ",
+ stats->he[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " VHT MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH11K_VHT_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ",
+ stats->vht[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, " HT MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH11K_HT_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ", stats->ht[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " BW %s (20,40,80,160 MHz)\n", str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->bw[j][0], stats->bw[j][1],
+ stats->bw[j][2], stats->bw[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->nss[j][0], stats->nss[j][1],
+ stats->nss[j][2], stats->nss[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " GI %s (0.4us,0.8us,1.6us,3.2us)\n",
+ str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->gi[j][0], stats->gi[j][1],
+ stats->gi[j][2], stats->gi[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " legacy rate %s (1,2 ... Mbps)\n ",
+ str[j]);
+ for (i = 0; i < ATH11K_LEGACY_NUM; i++)
+ len += scnprintf(buf + len, size - len, "%llu ",
+ stats->legacy[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ }
+ }
+
+ len += scnprintf(buf + len, size - len,
+ "\nTX duration\n %llu usecs\n",
+ arsta->tx_stats->tx_duration);
+ len += scnprintf(buf + len, size - len,
+ "BA fails\n %llu\n", arsta->tx_stats->ba_fails);
+ len += scnprintf(buf + len, size - len,
+ "ack fails\n %llu\n", arsta->tx_stats->ack_fails);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ mutex_unlock(&ar->conf_mutex);
+ return retval;
+}
+
+static const struct file_operations fops_tx_stats = {
+ .read = ath11k_dbg_sta_dump_tx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_dump_rx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
+ int len = 0, i, retval = 0;
+ const int size = 4096;
+ char *buf;
+
+ if (!rx_stats)
+ return -ENOENT;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+ spin_lock_bh(&ar->ab->base_lock);
+
+ len += scnprintf(buf + len, size - len, "RX peer stats:\n");
+ len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n",
+ rx_stats->num_msdu);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs with TCP L4: %llu\n",
+ rx_stats->tcp_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs with UDP L4: %llu\n",
+ rx_stats->udp_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs part of AMPDU: %llu\n",
+ rx_stats->ampdu_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs not part of AMPDU: %llu\n",
+ rx_stats->non_ampdu_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs using STBC: %llu\n",
+ rx_stats->stbc_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs beamformed: %llu\n",
+ rx_stats->beamformed_count);
+ len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS ok: %llu\n",
+ rx_stats->num_mpdu_fcs_ok);
+ len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS error: %llu\n",
+ rx_stats->num_mpdu_fcs_err);
+ len += scnprintf(buf + len, size - len,
+ "GI: 0.8us %llu 0.4us %llu 1.6us %llu 3.2us %llu\n",
+ rx_stats->gi_count[0], rx_stats->gi_count[1],
+ rx_stats->gi_count[2], rx_stats->gi_count[3]);
+ len += scnprintf(buf + len, size - len,
+ "BW: 20Mhz %llu 40Mhz %llu 80Mhz %llu 160Mhz %llu\n",
+ rx_stats->bw_count[0], rx_stats->bw_count[1],
+ rx_stats->bw_count[2], rx_stats->bw_count[3]);
+ len += scnprintf(buf + len, size - len, "BCC %llu LDPC %llu\n",
+ rx_stats->coding_count[0], rx_stats->coding_count[1]);
+ len += scnprintf(buf + len, size - len,
+ "preamble: 11A %llu 11B %llu 11N %llu 11AC %llu 11AX %llu\n",
+ rx_stats->pream_cnt[0], rx_stats->pream_cnt[1],
+ rx_stats->pream_cnt[2], rx_stats->pream_cnt[3],
+ rx_stats->pream_cnt[4]);
+ len += scnprintf(buf + len, size - len,
+ "reception type: SU %llu MU_MIMO %llu MU_OFDMA %llu MU_OFDMA_MIMO %llu\n",
+ rx_stats->reception_type[0], rx_stats->reception_type[1],
+ rx_stats->reception_type[2], rx_stats->reception_type[3]);
+ len += scnprintf(buf + len, size - len, "TID(0-15) Legacy TID(16):");
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]);
+ len += scnprintf(buf + len, size - len, "\nMCS(0-11) Legacy MCS(12):");
+ for (i = 0; i < HAL_RX_MAX_MCS + 1; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->mcs_count[i]);
+ len += scnprintf(buf + len, size - len, "\nNSS(1-8):");
+ for (i = 0; i < HAL_RX_MAX_NSS; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->nss_count[i]);
+ len += scnprintf(buf + len, size - len, "\nRX Duration:%llu ",
+ rx_stats->rx_duration);
+ len += scnprintf(buf + len, size - len,
+ "\nDCM: %llu\nRU: 26 %llu 52: %llu 106: %llu 242: %llu 484: %llu 996: %llu\n",
+ rx_stats->dcm_count, rx_stats->ru_alloc_cnt[0],
+ rx_stats->ru_alloc_cnt[1], rx_stats->ru_alloc_cnt[2],
+ rx_stats->ru_alloc_cnt[3], rx_stats->ru_alloc_cnt[4],
+ rx_stats->ru_alloc_cnt[5]);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ mutex_unlock(&ar->conf_mutex);
+ return retval;
+}
+
+static const struct file_operations fops_rx_stats = {
+ .read = ath11k_dbg_sta_dump_rx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int
+ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
+{
+ struct ieee80211_sta *sta = inode->i_private;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ struct debug_htt_stats_req *stats_req;
+ int type = ar->debug.htt_stats.type;
+ int ret;
+
+ if ((type != ATH11K_DBG_HTT_EXT_STATS_PEER_INFO &&
+ type != ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS) ||
+ type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ return -EPERM;
+
+ stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
+ if (!stats_req)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->debug.htt_stats.stats_req = stats_req;
+ stats_req->type = type;
+ memcpy(stats_req->peer_addr, sta->addr, ETH_ALEN);
+ ret = ath11k_debugfs_htt_stats_req(ar);
+ mutex_unlock(&ar->conf_mutex);
+ if (ret < 0)
+ goto out;
+
+ file->private_data = stats_req;
+ return 0;
+out:
+ vfree(stats_req);
+ ar->debug.htt_stats.stats_req = NULL;
+ return ret;
+}
+
+static int
+ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file)
+{
+ struct ieee80211_sta *sta = inode->i_private;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+
+ mutex_lock(&ar->conf_mutex);
+ vfree(file->private_data);
+ ar->debug.htt_stats.stats_req = NULL;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static ssize_t ath11k_dbg_sta_read_htt_peer_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debug_htt_stats_req *stats_req = file->private_data;
+ char *buf;
+ u32 length = 0;
+
+ buf = stats_req->buf;
+ length = min_t(u32, stats_req->buf_len, ATH11K_HTT_STATS_BUF_SIZE);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_htt_peer_stats = {
+ .open = ath11k_dbg_sta_open_htt_peer_stats,
+ .release = ath11k_dbg_sta_release_htt_peer_stats,
+ .read = ath11k_dbg_sta_read_htt_peer_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_write_peer_pktlog(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ int ret, enable;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ ret = kstrtoint_from_user(buf, count, 0, &enable);
+ if (ret)
+ goto out;
+
+ ar->debug.pktlog_peer_valid = enable;
+ memcpy(ar->debug.pktlog_peer_addr, sta->addr, ETH_ALEN);
+
+ /* Send peer based pktlog enable/disable */
+ ret = ath11k_wmi_pdev_peer_pktlog_filter(ar, sta->addr, enable);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set peer pktlog filter %pM: %d\n",
+ sta->addr, ret);
+ goto out;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "peer pktlog filter set to %d\n",
+ enable);
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_dbg_sta_read_peer_pktlog(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[32] = {};
+ int len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%08x %pM\n",
+ ar->debug.pktlog_peer_valid,
+ ar->debug.pktlog_peer_addr);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_pktlog = {
+ .write = ath11k_dbg_sta_write_peer_pktlog,
+ .read = ath11k_dbg_sta_read_peer_pktlog,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_write_delba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 tid, initiator, reason;
+ int ret;
+ char buf[64] = {};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
+ if (ret != 3)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, initiator, reason);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, initiator,
+ reason);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_delba = {
+ .write = ath11k_dbg_sta_write_delba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_write_addba_resp(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 tid, status;
+ int ret;
+ char buf[64] = {};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u", &tid, &status);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, status);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, status);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba_resp = {
+ .write = ath11k_dbg_sta_write_addba_resp,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_write_addba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 tid, buf_size;
+ int ret;
+ char buf[64] = {};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u", &tid, &buf_size);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, buf_size);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, buf_size);
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba = {
+ .write = ath11k_dbg_sta_write_addba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_aggr_mode(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[64];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len,
+ "aggregation mode: %s\n\n%s\n%s\n",
+ (arsta->aggr_mode == ATH11K_DBG_AGGR_MODE_AUTO) ?
+ "auto" : "manual", "auto = 0", "manual = 1");
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_dbg_sta_write_aggr_mode(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 aggr_mode;
+ int ret;
+
+ if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
+ return -EINVAL;
+
+ if (aggr_mode >= ATH11K_DBG_AGGR_MODE_MAX)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ aggr_mode == arsta->aggr_mode) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear addba session ret: %d\n",
+ ret);
+ goto out;
+ }
+
+ arsta->aggr_mode = aggr_mode;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_aggr_mode = {
+ .read = ath11k_dbg_sta_read_aggr_mode,
+ .write = ath11k_dbg_sta_write_aggr_mode,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t
+ath11k_write_htt_peer_stats_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ struct htt_ext_stats_cfg_params cfg_params = {};
+ int ret;
+ u8 type;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (!type)
+ return ret;
+
+ mutex_lock(&ar->conf_mutex);
+ cfg_params.cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+ cfg_params.cfg0 |= FIELD_PREP(GENMASK(15, 1),
+ HTT_PEER_STATS_REQ_MODE_FLUSH_TQM);
+
+ cfg_params.cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE;
+
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(7, 0), sta->addr[0]);
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(15, 8), sta->addr[1]);
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(23, 16), sta->addr[2]);
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(31, 24), sta->addr[3]);
+
+ cfg_params.cfg3 |= FIELD_PREP(GENMASK(7, 0), sta->addr[4]);
+ cfg_params.cfg3 |= FIELD_PREP(GENMASK(15, 8), sta->addr[5]);
+
+ cfg_params.cfg3 |= ATH11K_HTT_PEER_STATS_RESET;
+
+ ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_INFO,
+ &cfg_params,
+ 0ULL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send htt peer stats request: %d\n", ret);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ ret = count;
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_peer_stats_reset = {
+ .write = ath11k_write_htt_peer_stats_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_peer_ps_state(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[20];
+ int len;
+
+ spin_lock_bh(&ar->data_lock);
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", arsta->peer_ps_state);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_ps_state = {
+ .open = simple_open,
+ .read = ath11k_dbg_sta_read_peer_ps_state,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_current_ps_duration(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ u64 time_since_station_in_power_save;
+ char buf[20];
+ int len;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON &&
+ arsta->peer_current_ps_valid)
+ time_since_station_in_power_save = jiffies_to_msecs(jiffies
+ - arsta->ps_start_jiffies);
+ else
+ time_since_station_in_power_save = 0;
+
+ len = scnprintf(buf, sizeof(buf), "%llu\n",
+ time_since_station_in_power_save);
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_current_ps_duration = {
+ .open = simple_open,
+ .read = ath11k_dbg_sta_read_current_ps_duration,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_total_ps_duration(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[20];
+ u64 power_save_duration;
+ int len;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON &&
+ arsta->peer_current_ps_valid)
+ power_save_duration = jiffies_to_msecs(jiffies
+ - arsta->ps_start_jiffies)
+ + arsta->ps_total_duration;
+ else
+ power_save_duration = arsta->ps_total_duration;
+
+ len = scnprintf(buf, sizeof(buf), "%llu\n", power_save_duration);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_total_ps_duration = {
+ .open = simple_open,
+ .read = ath11k_dbg_sta_read_total_ps_duration,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
+{
+ struct ath11k *ar = hw->priv;
+
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
+ debugfs_create_file("tx_stats", 0400, dir, sta,
+ &fops_tx_stats);
+ if (ath11k_debugfs_is_extd_rx_stats_enabled(ar))
+ debugfs_create_file("rx_stats", 0400, dir, sta,
+ &fops_rx_stats);
+
+ debugfs_create_file("htt_peer_stats", 0400, dir, sta,
+ &fops_htt_peer_stats);
+
+ debugfs_create_file("peer_pktlog", 0644, dir, sta,
+ &fops_peer_pktlog);
+
+ debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
+ debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
+ debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
+ debugfs_create_file("delba", 0200, dir, sta, &fops_delba);
+
+ if (test_bit(WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET,
+ ar->ab->wmi_ab.svc_map))
+ debugfs_create_file("htt_peer_stats_reset", 0600, dir, sta,
+ &fops_htt_peer_stats_reset);
+
+ debugfs_create_file("peer_ps_state", 0400, dir, sta,
+ &fops_peer_ps_state);
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ debugfs_create_file("current_ps_duration", 0440, dir, sta,
+ &fops_current_ps_duration);
+ debugfs_create_file("total_ps_duration", 0440, dir, sta,
+ &fops_total_ps_duration);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.h b/drivers/net/wireless/ath/ath11k/debugfs_sta.h
new file mode 100644
index 000000000000..ace877e19275
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH11K_DEBUGFS_STA_H_
+#define _ATH11K_DEBUGFS_STA_H_
+
+#include <net/mac80211.h>
+
+#include "core.h"
+#include "hal_tx.h"
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+
+void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir);
+void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx);
+void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
+ struct hal_tx_status *ts);
+
+#else /* CONFIG_ATH11K_DEBUGFS */
+
+#define ath11k_debugfs_sta_op_add NULL
+
+static inline void
+ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx)
+{
+}
+
+static inline void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
+ struct hal_tx_status *ts)
+{
+}
+
+#endif /* CONFIG_ATH11K_DEBUGFS */
+
+#endif /* _ATH11K_DEBUGFS_STA_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
new file mode 100644
index 000000000000..56b1a657e0b0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -0,0 +1,1194 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <crypto/hash.h>
+#include <linux/export.h>
+#include "core.h"
+#include "dp_tx.h"
+#include "hal_tx.h"
+#include "hif.h"
+#include "debug.h"
+#include "dp_rx.h"
+#include "peer.h"
+
+static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+
+ /* TODO: Any other peer specific DP cleanup */
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, vdev_id, addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
+ addr, vdev_id);
+ spin_unlock_bh(&ab->base_lock);
+ return;
+ }
+
+ ath11k_peer_rx_tid_cleanup(ar, peer);
+ peer->dp_setup_done = false;
+ crypto_free_shash(peer->tfm_mmic);
+ spin_unlock_bh(&ab->base_lock);
+}
+
+int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ u32 reo_dest;
+ int ret = 0, tid;
+
+ /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
+ reo_dest = ar->dp.mac_id + 1;
+ ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
+ WMI_PEER_SET_DEFAULT_ROUTING,
+ DP_RX_HASH_ENABLE | (reo_dest << 1));
+
+ if (ret) {
+ ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
+ ret, addr, vdev_id);
+ return ret;
+ }
+
+ for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
+ ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
+ HAL_PN_TYPE_NONE);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
+ tid, ret);
+ goto peer_clean;
+ }
+ }
+
+ ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rx defrag context\n");
+ tid--;
+ goto peer_clean;
+ }
+
+ /* TODO: Setup other peer specific resource used in data path */
+
+ return 0;
+
+peer_clean:
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to del rx tid\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ for (; tid >= 0; tid--)
+ ath11k_peer_rx_tid_delete(ar, peer, tid);
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return ret;
+}
+
+void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
+{
+ if (!ring->vaddr_unaligned)
+ return;
+
+ if (ring->cached)
+ dma_free_noncoherent(ab->dev, ring->size, ring->vaddr_unaligned,
+ ring->paddr_unaligned, DMA_FROM_DEVICE);
+ else
+ dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
+ ring->paddr_unaligned);
+
+ ring->vaddr_unaligned = NULL;
+}
+
+static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
+{
+ int ext_group_num;
+ u8 mask = 1 << ring_num;
+
+ for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
+ ext_group_num++) {
+ if (mask & grp_mask[ext_group_num])
+ return ext_group_num;
+ }
+
+ return -ENOENT;
+}
+
+static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
+ enum hal_ring_type type, int ring_num)
+{
+ const u8 *grp_mask;
+
+ switch (type) {
+ case HAL_WBM2SW_RELEASE:
+ if (ring_num == DP_RX_RELEASE_RING_NUM) {
+ grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
+ ring_num = 0;
+ } else {
+ grp_mask = &ab->hw_params.ring_mask->tx[0];
+ }
+ break;
+ case HAL_REO_EXCEPTION:
+ grp_mask = &ab->hw_params.ring_mask->rx_err[0];
+ break;
+ case HAL_REO_DST:
+ grp_mask = &ab->hw_params.ring_mask->rx[0];
+ break;
+ case HAL_REO_STATUS:
+ grp_mask = &ab->hw_params.ring_mask->reo_status[0];
+ break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ case HAL_RXDMA_MONITOR_DST:
+ grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
+ break;
+ case HAL_RXDMA_DST:
+ grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
+ break;
+ case HAL_RXDMA_BUF:
+ grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
+ break;
+ case HAL_RXDMA_MONITOR_BUF:
+ case HAL_TCL_DATA:
+ case HAL_TCL_CMD:
+ case HAL_REO_CMD:
+ case HAL_SW2WBM_RELEASE:
+ case HAL_WBM_IDLE_LINK:
+ case HAL_TCL_STATUS:
+ case HAL_REO_REINJECT:
+ case HAL_CE_SRC:
+ case HAL_CE_DST:
+ case HAL_CE_DST_STATUS:
+ default:
+ return -ENOENT;
+ }
+
+ return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
+}
+
+static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
+ struct hal_srng_params *ring_params,
+ enum hal_ring_type type, int ring_num)
+{
+ int msi_group_number, msi_data_count;
+ u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
+ int ret;
+
+ ret = ath11k_get_user_msi_vector(ab, "DP",
+ &msi_data_count, &msi_data_start,
+ &msi_irq_start);
+ if (ret)
+ return;
+
+ msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
+ ring_num);
+ if (msi_group_number < 0) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "ring not part of an ext_group; ring_type: %d,ring_num %d",
+ type, ring_num);
+ ring_params->msi_addr = 0;
+ ring_params->msi_data = 0;
+ return;
+ }
+
+ if (msi_group_number > msi_data_count) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "multiple msi_groups share one msi, msi_group_num %d",
+ msi_group_number);
+ }
+
+ ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
+
+ ring_params->msi_addr = addr_lo;
+ ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
+ ring_params->msi_data = (msi_group_number % msi_data_count)
+ + msi_data_start;
+ ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+}
+
+int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
+ enum hal_ring_type type, int ring_num,
+ int mac_id, int num_entries)
+{
+ struct hal_srng_params params = {};
+ int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
+ int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
+ int ret;
+ bool cached = false;
+
+ if (max_entries < 0 || entry_sz < 0)
+ return -EINVAL;
+
+ if (num_entries > max_entries)
+ num_entries = max_entries;
+
+ ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
+
+ if (ab->hw_params.alloc_cacheable_memory) {
+ /* Allocate the reo dst and tx completion rings from cacheable memory */
+ switch (type) {
+ case HAL_REO_DST:
+ case HAL_WBM2SW_RELEASE:
+ cached = true;
+ break;
+ default:
+ cached = false;
+ }
+ }
+
+ if (cached)
+ ring->vaddr_unaligned = dma_alloc_noncoherent(ab->dev, ring->size,
+ &ring->paddr_unaligned,
+ DMA_FROM_DEVICE,
+ GFP_KERNEL);
+ else
+ ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
+ &ring->paddr_unaligned,
+ GFP_KERNEL);
+
+ if (!ring->vaddr_unaligned)
+ return -ENOMEM;
+
+ ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
+ ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
+ (unsigned long)ring->vaddr_unaligned);
+
+ params.ring_base_vaddr = ring->vaddr;
+ params.ring_base_paddr = ring->paddr;
+ params.num_entries = num_entries;
+ ath11k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
+
+ switch (type) {
+ case HAL_REO_DST:
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_RX;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_RXDMA_BUF:
+ case HAL_RXDMA_MONITOR_BUF:
+ case HAL_RXDMA_MONITOR_STATUS:
+ params.low_threshold = num_entries >> 3;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.intr_batch_cntr_thres_entries = 0;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_WBM2SW_RELEASE:
+ if (ring_num < 3) {
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_TX;
+ params.intr_timer_thres_us =
+ HAL_SRNG_INT_TIMER_THRESHOLD_TX;
+ break;
+ }
+ /* follow through when ring_num >= 3 */
+ fallthrough;
+ case HAL_REO_EXCEPTION:
+ case HAL_REO_REINJECT:
+ case HAL_REO_CMD:
+ case HAL_REO_STATUS:
+ case HAL_TCL_DATA:
+ case HAL_TCL_CMD:
+ case HAL_TCL_STATUS:
+ case HAL_WBM_IDLE_LINK:
+ case HAL_SW2WBM_RELEASE:
+ case HAL_RXDMA_DST:
+ case HAL_RXDMA_MONITOR_DST:
+ case HAL_RXDMA_MONITOR_DESC:
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
+ break;
+ case HAL_RXDMA_DIR_BUF:
+ break;
+ default:
+ ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
+ return -EINVAL;
+ }
+
+ if (cached) {
+ params.flags |= HAL_SRNG_FLAGS_CACHED;
+ ring->cached = 1;
+ }
+
+ ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ring_num);
+ return ret;
+ }
+
+ ring->ring_id = ret;
+
+ return 0;
+}
+
+void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
+{
+ int i;
+
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++)
+ ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
+
+ ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
+}
+
+static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+
+ ath11k_dp_stop_shadow_timers(ab);
+ ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
+ }
+ ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
+}
+
+static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ int i, ret;
+ u8 tcl_num, wbm_num;
+
+ ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
+ HAL_SW2WBM_RELEASE, 0, 0,
+ DP_WBM_RELEASE_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
+ DP_TCL_CMD_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
+ 0, 0, DP_TCL_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
+ wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
+ HAL_TCL_DATA, tcl_num, 0,
+ ab->hw_params.tx_ring_size);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
+ i, ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
+ HAL_WBM2SW_RELEASE, wbm_num, 0,
+ DP_TX_COMP_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
+ i, ret);
+ goto err;
+ }
+
+ srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
+ ath11k_hal_tx_init_data_ring(ab, srng);
+
+ ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
+ ATH11K_SHADOW_DP_TIMER_INTERVAL,
+ dp->tx_ring[i].tcl_data_ring.ring_id);
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
+ 0, 0, DP_REO_REINJECT_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
+ DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
+ 0, 0, DP_REO_EXCEPTION_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
+ 0, 0, DP_REO_CMD_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
+ goto err;
+ }
+
+ srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ ath11k_hal_reo_init_cmd_ring(ab, srng);
+
+ ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
+ ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
+ dp->reo_cmd_ring.ring_id);
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
+ 0, 0, DP_REO_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
+ goto err;
+ }
+
+ /* When hash based routing of rx packet is enabled, 32 entries to map
+ * the hash values to the ring will be configured.
+ */
+ ab->hw_params.hw_ops->reo_setup(ab);
+
+ return 0;
+
+err:
+ ath11k_dp_srng_common_cleanup(ab);
+
+ return ret;
+}
+
+static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+ int i;
+
+ for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
+ if (!slist[i].vaddr)
+ continue;
+
+ dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
+ slist[i].vaddr, slist[i].paddr);
+ slist[i].vaddr = NULL;
+ }
+}
+
+static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
+ int size,
+ u32 n_link_desc_bank,
+ u32 n_link_desc,
+ u32 last_bank_sz)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
+ struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+ u32 n_entries_per_buf;
+ int num_scatter_buf, scatter_idx;
+ struct hal_wbm_link_desc *scatter_buf;
+ int align_bytes, n_entries;
+ dma_addr_t paddr;
+ int rem_entries;
+ int i;
+ int ret = 0;
+ u32 end_offset;
+
+ n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
+ ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
+ num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
+
+ if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < num_scatter_buf; i++) {
+ slist[i].vaddr = dma_alloc_coherent(ab->dev,
+ HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
+ &slist[i].paddr, GFP_KERNEL);
+ if (!slist[i].vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ scatter_idx = 0;
+ scatter_buf = slist[scatter_idx].vaddr;
+ rem_entries = n_entries_per_buf;
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ align_bytes = link_desc_banks[i].vaddr -
+ link_desc_banks[i].vaddr_unaligned;
+ n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
+ HAL_LINK_DESC_SIZE;
+ paddr = link_desc_banks[i].paddr;
+ while (n_entries) {
+ ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
+ n_entries--;
+ paddr += HAL_LINK_DESC_SIZE;
+ if (rem_entries) {
+ rem_entries--;
+ scatter_buf++;
+ continue;
+ }
+
+ rem_entries = n_entries_per_buf;
+ scatter_idx++;
+ scatter_buf = slist[scatter_idx].vaddr;
+ }
+ }
+
+ end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
+ sizeof(struct hal_wbm_link_desc);
+ ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
+ n_link_desc, end_offset);
+
+ return 0;
+
+err:
+ ath11k_dp_scatter_idle_link_desc_cleanup(ab);
+
+ return ret;
+}
+
+static void
+ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks)
+{
+ int i;
+
+ for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
+ if (link_desc_banks[i].vaddr_unaligned) {
+ dma_free_coherent(ab->dev,
+ link_desc_banks[i].size,
+ link_desc_banks[i].vaddr_unaligned,
+ link_desc_banks[i].paddr_unaligned);
+ link_desc_banks[i].vaddr_unaligned = NULL;
+ }
+ }
+}
+
+static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ int n_link_desc_bank,
+ int last_bank_sz)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+ int ret = 0;
+ int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ if (i == (n_link_desc_bank - 1) && last_bank_sz)
+ desc_sz = last_bank_sz;
+
+ desc_bank[i].vaddr_unaligned =
+ dma_alloc_coherent(ab->dev, desc_sz,
+ &desc_bank[i].paddr_unaligned,
+ GFP_KERNEL);
+ if (!desc_bank[i].vaddr_unaligned) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
+ HAL_LINK_DESC_ALIGN);
+ desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
+ ((unsigned long)desc_bank[i].vaddr -
+ (unsigned long)desc_bank[i].vaddr_unaligned);
+ desc_bank[i].size = desc_sz;
+ }
+
+ return 0;
+
+err:
+ ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
+
+ return ret;
+}
+
+void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ u32 ring_type, struct dp_srng *ring)
+{
+ ath11k_dp_link_desc_bank_free(ab, desc_bank);
+
+ if (ring_type != HAL_RXDMA_MONITOR_DESC) {
+ ath11k_dp_srng_cleanup(ab, ring);
+ ath11k_dp_scatter_idle_link_desc_cleanup(ab);
+ }
+}
+
+static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ u32 n_mpdu_link_desc, n_mpdu_queue_desc;
+ u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
+ int ret = 0;
+
+ n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
+ HAL_NUM_MPDUS_PER_LINK_DESC;
+
+ n_mpdu_queue_desc = n_mpdu_link_desc /
+ HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
+
+ n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
+ DP_AVG_MSDUS_PER_FLOW) /
+ HAL_NUM_TX_MSDUS_PER_LINK_DESC;
+
+ n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
+ DP_AVG_MSDUS_PER_MPDU) /
+ HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+ *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
+ n_tx_msdu_link_desc + n_rx_msdu_link_desc;
+
+ if (*n_link_desc & (*n_link_desc - 1))
+ *n_link_desc = 1 << fls(*n_link_desc);
+
+ ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
+ HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks,
+ u32 ring_type, struct hal_srng *srng,
+ u32 n_link_desc)
+{
+ u32 tot_mem_sz;
+ u32 n_link_desc_bank, last_bank_sz;
+ u32 entry_sz, align_bytes, n_entries;
+ u32 paddr;
+ u32 *desc;
+ int i, ret;
+
+ tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
+ tot_mem_sz += HAL_LINK_DESC_ALIGN;
+
+ if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
+ n_link_desc_bank = 1;
+ last_bank_sz = tot_mem_sz;
+ } else {
+ n_link_desc_bank = tot_mem_sz /
+ (DP_LINK_DESC_ALLOC_SIZE_THRESH -
+ HAL_LINK_DESC_ALIGN);
+ last_bank_sz = tot_mem_sz %
+ (DP_LINK_DESC_ALLOC_SIZE_THRESH -
+ HAL_LINK_DESC_ALIGN);
+
+ if (last_bank_sz)
+ n_link_desc_bank += 1;
+ }
+
+ if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
+ return -EINVAL;
+
+ ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
+ n_link_desc_bank, last_bank_sz);
+ if (ret)
+ return ret;
+
+ /* Setup link desc idle list for HW internal usage */
+ entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
+ tot_mem_sz = entry_sz * n_link_desc;
+
+ /* Setup scatter desc list when the total memory requirement is more */
+ if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
+ ring_type != HAL_RXDMA_MONITOR_DESC) {
+ ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
+ n_link_desc_bank,
+ n_link_desc,
+ last_bank_sz);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
+ ret);
+ goto fail_desc_bank_free;
+ }
+
+ return 0;
+ }
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ align_bytes = link_desc_banks[i].vaddr -
+ link_desc_banks[i].vaddr_unaligned;
+ n_entries = (link_desc_banks[i].size - align_bytes) /
+ HAL_LINK_DESC_SIZE;
+ paddr = link_desc_banks[i].paddr;
+ while (n_entries &&
+ (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
+ ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
+ i, paddr);
+ n_entries--;
+ paddr += HAL_LINK_DESC_SIZE;
+ }
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return 0;
+
+fail_desc_bank_free:
+ ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
+
+ return ret;
+}
+
+int ath11k_dp_service_srng(struct ath11k_base *ab,
+ struct ath11k_ext_irq_grp *irq_grp,
+ int budget)
+{
+ struct napi_struct *napi = &irq_grp->napi;
+ const struct ath11k_hw_hal_params *hal_params;
+ int grp_id = irq_grp->grp_id;
+ int work_done = 0;
+ int i, j;
+ int tot_work_done = 0;
+
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
+ ab->hw_params.ring_mask->tx[grp_id])
+ ath11k_dp_tx_completion_handler(ab, i);
+ }
+
+ if (ab->hw_params.ring_mask->rx_err[grp_id]) {
+ work_done = ath11k_dp_process_rx_err(ab, napi, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
+ work_done = ath11k_dp_rx_process_wbm_err(ab,
+ napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ab->hw_params.ring_mask->rx[grp_id]) {
+ i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
+ work_done = ath11k_dp_process_rx(ab, i, napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
+ for (i = 0; i < ab->num_radios; i++) {
+ for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params.num_rxdma_per_pdev + j;
+
+ if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
+ BIT(id)) {
+ work_done =
+ ath11k_dp_rx_process_mon_rings(ab,
+ id,
+ napi, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+ }
+ }
+ }
+
+ if (ab->hw_params.ring_mask->reo_status[grp_id])
+ ath11k_dp_process_reo_status(ab);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params.num_rxdma_per_pdev + j;
+
+ if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
+ work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ }
+
+ if (budget <= 0)
+ goto done;
+
+ if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
+ struct ath11k *ar = ath11k_ab_to_ar(ab, id);
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ hal_params = ab->hw_params.hal_params;
+ ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
+ hal_params->rx_buf_rbm);
+ }
+ }
+ }
+ /* TODO: Implement handler for other interrupts */
+
+done:
+ return tot_work_done;
+}
+EXPORT_SYMBOL(ath11k_dp_service_srng);
+
+void ath11k_dp_pdev_free(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ int i;
+
+ timer_delete_sync(&ab->mon_reap_timer);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ ath11k_dp_rx_pdev_free(ab, i);
+ ath11k_debugfs_unregister(ar);
+ ath11k_dp_rx_pdev_mon_detach(ar);
+ }
+}
+
+void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev_dp *dp;
+ int i;
+ int j;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ dp = &ar->dp;
+ dp->mac_id = i;
+ idr_init(&dp->rx_refill_buf_ring.bufs_idr);
+ spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
+ atomic_set(&dp->num_tx_pending, 0);
+ init_waitqueue_head(&dp->tx_empty_waitq);
+ for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
+ idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
+ spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
+ }
+ idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
+ spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
+ }
+}
+
+int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ int ret;
+ int i;
+
+ /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ ret = ath11k_dp_rx_pdev_alloc(ab, i);
+ if (ret) {
+ ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
+ i);
+ goto err;
+ }
+ ret = ath11k_dp_rx_pdev_mon_attach(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to initialize mon pdev %d\n",
+ i);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ ath11k_dp_pdev_free(ab);
+
+ return ret;
+}
+
+int ath11k_dp_htt_connect(struct ath11k_dp *dp)
+{
+ struct ath11k_htc_svc_conn_req conn_req;
+ struct ath11k_htc_svc_conn_resp conn_resp;
+ int status;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
+
+ /* connect to control service */
+ conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
+
+ status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
+ &conn_resp);
+
+ if (status)
+ return status;
+
+ dp->eid = conn_resp.eid;
+
+ return 0;
+}
+
+static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
+{
+ /* When v2_map_support is true:for STA mode, enable address
+ * search index, tcl uses ast_hash value in the descriptor.
+ * When v2_map_support is false: for STA mode, don't enable
+ * address search index.
+ */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
+ arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
+ } else {
+ arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ }
+ break;
+ case WMI_VDEV_TYPE_AP:
+ case WMI_VDEV_TYPE_IBSS:
+ arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ break;
+ case WMI_VDEV_TYPE_MONITOR:
+ default:
+ return;
+ }
+}
+
+void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
+{
+ arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
+ FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
+ arvif->vdev_id) |
+ FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
+ ar->pdev->pdev_id);
+
+ /* set HTT extension valid bit to 0 by default */
+ arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+
+ ath11k_dp_update_vdev_search(arvif);
+}
+
+static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
+{
+ struct ath11k_base *ab = ctx;
+ struct sk_buff *msdu = skb;
+
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ dev_kfree_skb_any(msdu);
+
+ return 0;
+}
+
+void ath11k_dp_free(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+
+ ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+
+ ath11k_dp_srng_common_cleanup(ab);
+
+ ath11k_dp_reo_cmd_list_cleanup(ab);
+
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
+ idr_for_each(&dp->tx_ring[i].txbuf_idr,
+ ath11k_dp_tx_pending_cleanup, ab);
+ idr_destroy(&dp->tx_ring[i].txbuf_idr);
+ spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
+ kfree(dp->tx_ring[i].tx_status);
+ }
+
+ /* Deinit any SOC level resource */
+}
+
+int ath11k_dp_alloc(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng = NULL;
+ size_t size = 0;
+ u32 n_link_desc = 0;
+ int ret;
+ int i;
+
+ dp->ab = ab;
+
+ INIT_LIST_HEAD(&dp->reo_cmd_list);
+ INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
+ INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
+ spin_lock_init(&dp->reo_cmd_lock);
+
+ dp->reo_cmd_cache_flush_count = 0;
+
+ ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
+ return ret;
+ }
+
+ srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
+
+ ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, srng, n_link_desc);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_common_setup(ab);
+ if (ret)
+ goto fail_link_desc_cleanup;
+
+ size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
+
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ idr_init(&dp->tx_ring[i].txbuf_idr);
+ spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
+ dp->tx_ring[i].tcl_data_ring_id = i;
+
+ dp->tx_ring[i].tx_status_head = 0;
+ dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
+ dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
+ if (!dp->tx_ring[i].tx_status) {
+ ret = -ENOMEM;
+ goto fail_cmn_srng_cleanup;
+ }
+ }
+
+ for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
+ ath11k_hal_tx_set_dscp_tid_map(ab, i);
+
+ /* Init any SOC level resource for DP */
+
+ return 0;
+
+fail_cmn_srng_cleanup:
+ ath11k_dp_srng_common_cleanup(ab);
+
+fail_link_desc_cleanup:
+ ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+
+ return ret;
+}
+
+static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
+{
+ struct ath11k_hp_update_timer *update_timer = timer_container_of(update_timer,
+ t,
+ timer);
+ struct ath11k_base *ab = update_timer->ab;
+ struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ /* when the timer is fired, the handler checks whether there
+ * are new TX happened. The handler updates HP only when there
+ * are no TX operations during the timeout interval, and stop
+ * the timer. Timer will be started again when TX happens again.
+ */
+ if (update_timer->timer_tx_num != update_timer->tx_num) {
+ update_timer->timer_tx_num = update_timer->tx_num;
+ mod_timer(&update_timer->timer, jiffies +
+ msecs_to_jiffies(update_timer->interval));
+ } else {
+ update_timer->started = false;
+ ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
+ }
+
+ spin_unlock_bh(&srng->lock);
+}
+
+void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
+ struct hal_srng *srng,
+ struct ath11k_hp_update_timer *update_timer)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ update_timer->tx_num++;
+
+ if (update_timer->started)
+ return;
+
+ update_timer->started = true;
+ update_timer->timer_tx_num = update_timer->tx_num;
+ mod_timer(&update_timer->timer, jiffies +
+ msecs_to_jiffies(update_timer->interval));
+}
+
+void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer)
+{
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ if (!update_timer->init)
+ return;
+
+ timer_delete_sync(&update_timer->timer);
+}
+
+void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer,
+ u32 interval, u32 ring_id)
+{
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ update_timer->tx_num = 0;
+ update_timer->timer_tx_num = 0;
+ update_timer->ab = ab;
+ update_timer->ring_id = ring_id;
+ update_timer->interval = interval;
+ update_timer->init = true;
+ timer_setup(&update_timer->timer,
+ ath11k_dp_shadow_timer_handler, 0);
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
new file mode 100644
index 000000000000..7a55afd33be8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -0,0 +1,1694 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_DP_H
+#define ATH11K_DP_H
+
+#include "hal_rx.h"
+
+#define MAX_RXDMA_PER_PDEV 2
+
+struct ath11k_base;
+struct ath11k_peer;
+struct ath11k_dp;
+struct ath11k_vif;
+struct hal_tcl_status_ring;
+struct ath11k_ext_irq_grp;
+
+struct dp_rx_tid {
+ u8 tid;
+ dma_addr_t paddr;
+ u32 size;
+ u32 ba_win_sz;
+ bool active;
+
+ /* Info related to rx fragments */
+ u32 cur_sn;
+ u16 last_frag_no;
+ u16 rx_frag_bitmap;
+
+ struct sk_buff_head rx_frags;
+ struct hal_reo_dest_ring *dst_ring_desc;
+
+ /* Timer info related to fragments */
+ struct timer_list frag_timer;
+ struct ath11k_base *ab;
+ u32 *vaddr_unaligned;
+ dma_addr_t paddr_unaligned;
+ u32 unaligned_size;
+};
+
+#define DP_REO_DESC_FREE_THRESHOLD 64
+#define DP_REO_DESC_FREE_TIMEOUT_MS 1000
+#define DP_MON_PURGE_TIMEOUT_MS 100
+#define DP_MON_SERVICE_BUDGET 128
+
+struct dp_reo_cache_flush_elem {
+ struct list_head list;
+ struct dp_rx_tid data;
+ unsigned long ts;
+};
+
+struct dp_reo_cmd {
+ struct list_head list;
+ struct dp_rx_tid data;
+ int cmd_num;
+ void (*handler)(struct ath11k_dp *, void *,
+ enum hal_reo_cmd_status status);
+};
+
+struct dp_srng {
+ u32 *vaddr_unaligned;
+ u32 *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ int size;
+ u32 ring_id;
+ u8 cached;
+};
+
+struct dp_rxdma_ring {
+ struct dp_srng refill_buf_ring;
+ struct idr bufs_idr;
+ /* Protects bufs_idr */
+ spinlock_t idr_lock;
+ int bufs_max;
+};
+
+#define ATH11K_TX_COMPL_NEXT(x) (((x) + 1) % DP_TX_COMP_RING_SIZE)
+
+struct dp_tx_ring {
+ u8 tcl_data_ring_id;
+ struct dp_srng tcl_data_ring;
+ struct dp_srng tcl_comp_ring;
+ struct idr txbuf_idr;
+ /* Protects txbuf_idr and num_pending */
+ spinlock_t tx_idr_lock;
+ struct hal_wbm_release_ring *tx_status;
+ int tx_status_head;
+ int tx_status_tail;
+};
+
+enum dp_mon_status_buf_state {
+ /* PPDU id matches in dst ring and status ring */
+ DP_MON_STATUS_MATCH,
+ /* status ring dma is not done */
+ DP_MON_STATUS_NO_DMA,
+ /* status ring is lagging, reap status ring */
+ DP_MON_STATUS_LAG,
+ /* status ring is leading, reap dst ring and drop */
+ DP_MON_STATUS_LEAD,
+ /* replinish monitor status ring */
+ DP_MON_STATUS_REPLINISH,
+};
+
+struct ath11k_pdev_mon_stats {
+ u32 status_ppdu_state;
+ u32 status_ppdu_start;
+ u32 status_ppdu_end;
+ u32 status_ppdu_compl;
+ u32 status_ppdu_start_mis;
+ u32 status_ppdu_end_mis;
+ u32 status_ppdu_done;
+ u32 dest_ppdu_done;
+ u32 dest_mpdu_done;
+ u32 dest_mpdu_drop;
+ u32 dup_mon_linkdesc_cnt;
+ u32 dup_mon_buf_cnt;
+ u32 dest_mon_stuck;
+ u32 dest_mon_not_reaped;
+};
+
+struct dp_full_mon_mpdu {
+ struct list_head list;
+ struct sk_buff *head;
+ struct sk_buff *tail;
+};
+
+struct dp_link_desc_bank {
+ void *vaddr_unaligned;
+ void *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ u32 size;
+};
+
+/* Size to enforce scatter idle list mode */
+#define DP_LINK_DESC_ALLOC_SIZE_THRESH 0x200000
+#define DP_LINK_DESC_BANKS_MAX 8
+
+#define DP_RX_DESC_COOKIE_INDEX_MAX 0x3ffff
+#define DP_RX_DESC_COOKIE_POOL_ID_MAX 0x1c0000
+#define DP_RX_DESC_COOKIE_MAX \
+ (DP_RX_DESC_COOKIE_INDEX_MAX | DP_RX_DESC_COOKIE_POOL_ID_MAX)
+#define DP_NOT_PPDU_ID_WRAP_AROUND 20000
+
+enum ath11k_dp_ppdu_state {
+ DP_PPDU_STATUS_START,
+ DP_PPDU_STATUS_DONE,
+};
+
+struct ath11k_mon_data {
+ struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ struct hal_rx_mon_ppdu_info mon_ppdu_info;
+
+ u32 mon_ppdu_status;
+ u32 mon_last_buf_cookie;
+ u64 mon_last_linkdesc_paddr;
+ u16 chan_noise_floor;
+ bool hold_mon_dst_ring;
+ enum dp_mon_status_buf_state buf_state;
+ dma_addr_t mon_status_paddr;
+ struct dp_full_mon_mpdu *mon_mpdu;
+ struct hal_sw_mon_ring_entries sw_mon_entries;
+ struct ath11k_pdev_mon_stats rx_mon_stats;
+ /* lock for monitor data */
+ spinlock_t mon_lock;
+};
+
+struct ath11k_pdev_dp {
+ u32 mac_id;
+ u32 mon_dest_ring_stuck_cnt;
+ atomic_t num_tx_pending;
+ wait_queue_head_t tx_empty_waitq;
+ struct dp_rxdma_ring rx_refill_buf_ring;
+ struct dp_srng rx_mac_buf_ring[MAX_RXDMA_PER_PDEV];
+ struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
+ struct dp_srng rxdma_mon_dst_ring;
+ struct dp_srng rxdma_mon_desc_ring;
+
+ struct dp_rxdma_ring rxdma_mon_buf_ring;
+ struct dp_rxdma_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV];
+ struct ieee80211_rx_status rx_status;
+ struct ath11k_mon_data mon_data;
+};
+
+#define DP_NUM_CLIENTS_MAX 64
+#define DP_AVG_TIDS_PER_CLIENT 2
+#define DP_NUM_TIDS_MAX (DP_NUM_CLIENTS_MAX * DP_AVG_TIDS_PER_CLIENT)
+#define DP_AVG_MSDUS_PER_FLOW 128
+#define DP_AVG_FLOWS_PER_TID 2
+#define DP_AVG_MPDUS_PER_TID_MAX 128
+#define DP_AVG_MSDUS_PER_MPDU 4
+
+#define DP_RX_HASH_ENABLE 1 /* Enable hash based Rx steering */
+
+#define DP_BA_WIN_SZ_MAX 256
+
+#define DP_TCL_NUM_RING_MAX 3
+#define DP_TCL_NUM_RING_MAX_QCA6390 1
+
+#define DP_IDLE_SCATTER_BUFS_MAX 16
+
+#define DP_WBM_RELEASE_RING_SIZE 64
+#define DP_TCL_DATA_RING_SIZE 512
+#define DP_TCL_DATA_RING_SIZE_WCN6750 2048
+#define DP_TX_COMP_RING_SIZE 32768
+#define DP_TX_IDR_SIZE DP_TX_COMP_RING_SIZE
+#define DP_TCL_CMD_RING_SIZE 32
+#define DP_TCL_STATUS_RING_SIZE 32
+#define DP_REO_DST_RING_MAX 4
+#define DP_REO_DST_RING_SIZE 2048
+#define DP_REO_REINJECT_RING_SIZE 32
+#define DP_RX_RELEASE_RING_SIZE 1024
+#define DP_REO_EXCEPTION_RING_SIZE 128
+#define DP_REO_CMD_RING_SIZE 256
+#define DP_REO_STATUS_RING_SIZE 2048
+#define DP_RXDMA_BUF_RING_SIZE 4096
+#define DP_RXDMA_REFILL_RING_SIZE 2048
+#define DP_RXDMA_ERR_DST_RING_SIZE 1024
+#define DP_RXDMA_MON_STATUS_RING_SIZE 1024
+#define DP_RXDMA_MONITOR_BUF_RING_SIZE 4096
+#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048
+#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
+
+#define DP_RX_RELEASE_RING_NUM 3
+
+#define DP_RX_BUFFER_SIZE 2048
+#define DP_RX_BUFFER_SIZE_LITE 1024
+#define DP_RX_BUFFER_ALIGN_SIZE 128
+
+#define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0)
+#define DP_RXDMA_BUF_COOKIE_PDEV_ID GENMASK(20, 18)
+
+#define DP_HW2SW_MACID(mac_id) ((mac_id) ? ((mac_id) - 1) : 0)
+#define DP_SW2HW_MACID(mac_id) ((mac_id) + 1)
+
+#define DP_TX_DESC_ID_MAC_ID GENMASK(1, 0)
+#define DP_TX_DESC_ID_MSDU_ID GENMASK(18, 2)
+#define DP_TX_DESC_ID_POOL_ID GENMASK(20, 19)
+
+#define ATH11K_SHADOW_DP_TIMER_INTERVAL 20
+#define ATH11K_SHADOW_CTRL_TIMER_INTERVAL 10
+
+struct ath11k_hp_update_timer {
+ struct timer_list timer;
+ bool started;
+ bool init;
+ u32 tx_num;
+ u32 timer_tx_num;
+ u32 ring_id;
+ u32 interval;
+ struct ath11k_base *ab;
+};
+
+struct ath11k_dp {
+ struct ath11k_base *ab;
+ enum ath11k_htc_ep_id eid;
+ struct completion htt_tgt_version_received;
+ u8 htt_tgt_ver_major;
+ u8 htt_tgt_ver_minor;
+ struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ struct dp_srng wbm_idle_ring;
+ struct dp_srng wbm_desc_rel_ring;
+ struct dp_srng tcl_cmd_ring;
+ struct dp_srng tcl_status_ring;
+ struct dp_srng reo_reinject_ring;
+ struct dp_srng rx_rel_ring;
+ struct dp_srng reo_except_ring;
+ struct dp_srng reo_cmd_ring;
+ struct dp_srng reo_status_ring;
+ struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
+ struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
+ struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
+ struct list_head reo_cmd_list;
+ struct list_head reo_cmd_cache_flush_list;
+ struct list_head dp_full_mon_mpdu_list;
+ u32 reo_cmd_cache_flush_count;
+ /**
+ * protects access to below fields,
+ * - reo_cmd_list
+ * - reo_cmd_cache_flush_list
+ * - reo_cmd_cache_flush_count
+ */
+ spinlock_t reo_cmd_lock;
+ struct ath11k_hp_update_timer reo_cmd_timer;
+ struct ath11k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
+};
+
+/* HTT definitions */
+
+#define HTT_TCL_META_DATA_TYPE BIT(0)
+#define HTT_TCL_META_DATA_VALID_HTT BIT(1)
+
+/* vdev meta data */
+#define HTT_TCL_META_DATA_VDEV_ID GENMASK(9, 2)
+#define HTT_TCL_META_DATA_PDEV_ID GENMASK(11, 10)
+#define HTT_TCL_META_DATA_HOST_INSPECTED BIT(12)
+
+/* peer meta data */
+#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2)
+
+#define HTT_TX_WBM_COMP_STATUS_OFFSET 8
+
+#define HTT_INVALID_PEER_ID 0xffff
+
+/* HTT tx completion is overlaid in wbm_release_ring */
+#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(12, 9)
+#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
+#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
+
+#define HTT_TX_WBM_COMP_INFO1_ACK_RSSI GENMASK(31, 24)
+#define HTT_TX_WBM_COMP_INFO2_SW_PEER_ID GENMASK(15, 0)
+#define HTT_TX_WBM_COMP_INFO2_VALID BIT(21)
+
+struct htt_tx_wbm_completion {
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+} __packed;
+
+enum htt_h2t_msg_type {
+ HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
+ HTT_H2T_MSG_TYPE_SRING_SETUP = 0xb,
+ HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG = 0xc,
+ HTT_H2T_MSG_TYPE_EXT_STATS_CFG = 0x10,
+ HTT_H2T_MSG_TYPE_PPDU_STATS_CFG = 0x11,
+ HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE = 0x17,
+};
+
+#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0)
+
+struct htt_ver_req_cmd {
+ u32 ver_reg_info;
+} __packed;
+
+enum htt_srng_ring_type {
+ HTT_HW_TO_SW_RING,
+ HTT_SW_TO_HW_RING,
+ HTT_SW_TO_SW_RING,
+};
+
+enum htt_srng_ring_id {
+ HTT_RXDMA_HOST_BUF_RING,
+ HTT_RXDMA_MONITOR_STATUS_RING,
+ HTT_RXDMA_MONITOR_BUF_RING,
+ HTT_RXDMA_MONITOR_DESC_RING,
+ HTT_RXDMA_MONITOR_DEST_RING,
+ HTT_HOST1_TO_FW_RXBUF_RING,
+ HTT_HOST2_TO_FW_RXBUF_RING,
+ HTT_RXDMA_NON_MONITOR_DEST_RING,
+};
+
+/* host -> target HTT_SRING_SETUP message
+ *
+ * After target is booted up, Host can send SRING setup message for
+ * each host facing LMAC SRING. Target setups up HW registers based
+ * on setup message and confirms back to Host if response_required is set.
+ * Host should wait for confirmation message before sending new SRING
+ * setup message
+ *
+ * The message would appear as follows:
+ *
+ * |31 24|23 20|19|18 16|15|14 8|7 0|
+ * |--------------- +-----------------+----------------+------------------|
+ * | ring_type | ring_id | pdev_id | msg_type |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_hi |
+ * |----------------------------------------------------------------------|
+ * |ring_misc_cfg_flag|ring_entry_size| ring_size |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_data |
+ * |----------------------------------------------------------------------|
+ * | intr_timer_th |IM| intr_batch_counter_th |
+ * |----------------------------------------------------------------------|
+ * | reserved |RR|PTCF| intr_low_threshold |
+ * |----------------------------------------------------------------------|
+ * Where
+ * IM = sw_intr_mode
+ * RR = response_required
+ * PTCF = prefetch_timer_cfg
+ *
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_SRING_SETUP
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id: identify which ring is to setup,
+ * more details can be got from enum htt_srng_ring_id
+ * b'24:31 - ring_type: identify type of host rings,
+ * more details can be got from enum htt_srng_ring_type
+ * dword1 - b'0:31 - ring_base_addr_lo: Lower 32bits of ring base address
+ * dword2 - b'0:31 - ring_base_addr_hi: Upper 32bits of ring base address
+ * dword3 - b'0:15 - ring_size: size of the ring in unit of 4-bytes words
+ * b'16:23 - ring_entry_size: Size of each entry in 4-byte word units
+ * b'24:31 - ring_misc_cfg_flag: Valid only for HW_TO_SW_RING and
+ * SW_TO_HW_RING.
+ * Refer to HTT_SRING_SETUP_RING_MISC_CFG_RING defs.
+ * dword4 - b'0:31 - ring_head_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword5 - b'0:31 - ring_head_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword6 - b'0:31 - ring_tail_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword7 - b'0:31 - ring_tail_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword8 - b'0:31 - ring_msi_addr_lo: Lower 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword9 - b'0:31 - ring_msi_addr_hi: Upper 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword10 - b'0:31 - ring_msi_data: MSI data
+ * Refer to HTT_SRING_SETUP_RING_MSC_CFG_xxx defs
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword11 - b'0:14 - intr_batch_counter_th:
+ * batch counter threshold is in units of 4-byte words.
+ * HW internally maintains and increments batch count.
+ * (see SRING spec for detail description).
+ * When batch count reaches threshold value, an interrupt
+ * is generated by HW.
+ * b'15 - sw_intr_mode:
+ * This configuration shall be static.
+ * Only programmed at power up.
+ * 0: generate pulse style sw interrupts
+ * 1: generate level style sw interrupts
+ * b'16:31 - intr_timer_th:
+ * The timer init value when timer is idle or is
+ * initialized to start downcounting.
+ * In 8us units (to cover a range of 0 to 524 ms)
+ * dword12 - b'0:15 - intr_low_threshold:
+ * Used only by Consumer ring to generate ring_sw_int_p.
+ * Ring entries low threshold water mark, that is used
+ * in combination with the interrupt timer as well as
+ * the clearing of the level interrupt.
+ * b'16:18 - prefetch_timer_cfg:
+ * Used only by Consumer ring to set timer mode to
+ * support Application prefetch handling.
+ * The external tail offset/pointer will be updated
+ * at following intervals:
+ * 3'b000: (Prefetch feature disabled; used only for debug)
+ * 3'b001: 1 usec
+ * 3'b010: 4 usec
+ * 3'b011: 8 usec (default)
+ * 3'b100: 16 usec
+ * Others: Reserved
+ * b'19 - response_required:
+ * Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
+ * b'20:31 - reserved: reserved for future use
+ */
+
+#define HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE GENMASK(31, 24)
+
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS BIT(25)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP BIT(27)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP BIT(28)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP BIT(29)
+
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH GENMASK(14, 0)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_SW_INTR_MODE BIT(15)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH GENMASK(31, 16)
+
+#define HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO2_PRE_FETCH_TIMER_CFG BIT(16)
+#define HTT_SRNG_SETUP_CMD_INFO2_RESPONSE_REQUIRED BIT(19)
+
+struct htt_srng_setup_cmd {
+ u32 info0;
+ u32 ring_base_addr_lo;
+ u32 ring_base_addr_hi;
+ u32 info1;
+ u32 ring_head_off32_remote_addr_lo;
+ u32 ring_head_off32_remote_addr_hi;
+ u32 ring_tail_off32_remote_addr_lo;
+ u32 ring_tail_off32_remote_addr_hi;
+ u32 ring_msi_addr_lo;
+ u32 ring_msi_addr_hi;
+ u32 msi_data;
+ u32 intr_info;
+ u32 info2;
+} __packed;
+
+/* host -> target FW PPDU_STATS config message
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW for PPDU_STATS_CFG msg.
+ * The message allows the host to configure the PPDU_STATS_IND messages
+ * produced by the target.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | REQ bit mask | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a req to configure ppdu_stats_ind from target
+ * Value: 0x11
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies which pdevs this PPDU stats configuration applies to
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - REQ_TLV_BIT_MASK
+ * Bits 16:31
+ * Purpose: each set bit indicates the corresponding PPDU stats TLV type
+ * needs to be included in the target's PPDU_STATS_IND messages.
+ * Value: refer htt_ppdu_stats_tlv_tag_t <<<???
+ *
+ */
+
+struct htt_ppdu_stats_cfg_cmd {
+ u32 msg;
+} __packed;
+
+#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0)
+#define HTT_PPDU_STATS_CFG_SOC_STATS BIT(8)
+#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 9)
+#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16)
+
+enum htt_ppdu_stats_tag_type {
+ HTT_PPDU_STATS_TAG_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMMON,
+ HTT_PPDU_STATS_TAG_USR_RATE,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256,
+ HTT_PPDU_STATS_TAG_SCH_CMD_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH,
+ HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY,
+ HTT_PPDU_STATS_TAG_INFO,
+ HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD,
+
+ /* New TLV's are added above to this line */
+ HTT_PPDU_STATS_TAG_MAX,
+};
+
+#define HTT_PPDU_STATS_TAG_DEFAULT (BIT(HTT_PPDU_STATS_TAG_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_RATE) \
+ | BIT(HTT_PPDU_STATS_TAG_SCH_CMD_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY))
+
+#define HTT_PPDU_STATS_TAG_PKTLOG (BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_INFO) | \
+ BIT(HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD) | \
+ HTT_PPDU_STATS_TAG_DEFAULT)
+
+/* HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG Message
+ *
+ * details:
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG message is sent by host to
+ * configure RXDMA rings.
+ * The configuration is per ring based and includes both packet subtypes
+ * and PPDU/MPDU TLVs.
+ *
+ * The message would appear as follows:
+ *
+ * |31 26|25|24|23 16|15 8|7 0|
+ * |-----------------+----------------+----------------+---------------|
+ * | rsvd1 |PS|SS| ring_id | pdev_id | msg_type |
+ * |-------------------------------------------------------------------|
+ * | rsvd2 | ring_buffer_size |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_0 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_1 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_2 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_3 |
+ * |-------------------------------------------------------------------|
+ * | tlv_filter_in_flags |
+ * |-------------------------------------------------------------------|
+ * Where:
+ * PS = pkt_swap
+ * SS = status_swap
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id : Identify the ring to configure.
+ * More details can be got from enum htt_srng_ring_id
+ * b'24 - status_swap: 1 is to swap status TLV
+ * b'25 - pkt_swap: 1 is to swap packet TLV
+ * b'26:31 - rsvd1: reserved for future use
+ * dword1 - b'0:16 - ring_buffer_size: size of buffers referenced by rx ring,
+ * in byte units.
+ * Valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * - b'16:31 - rsvd2: Reserved for future use
+ * dword2 - b'0:31 - packet_type_enable_flags_0:
+ * Enable MGMT packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * FP: Filter_Pass
+ * MD: Monitor_Direct
+ * MO: Monitor_Other
+ * 10 mgmt subtypes * 3 bits -> 30 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG0_xxx_MGMT_xxx defs
+ * dword3 - b'0:31 - packet_type_enable_flags_1:
+ * Enable MGMT packet from 0b1010 to 0b1111
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG1_xxx_MGMT_xxx defs
+ * dword4 - b'0:31 - packet_type_enable_flags_2:
+ * Enable CTRL packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG2_xxx_CTRL_xxx defs
+ * dword5 - b'0:31 - packet_type_enable_flags_3:
+ * Enable CTRL packet from 0b1010 to 0b1111,
+ * MCAST_DATA, UCAST_DATA, NULL_DATA
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG3_xxx_CTRL_xxx defs
+ * dword6 - b'0:31 - tlv_filter_in_flags:
+ * Filter in Attention/MPDU/PPDU/Header/User tlvs
+ * Refer to CFG_TLV_FILTER_IN_FLAG defs
+ */
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0)
+
+enum htt_rx_filter_tlv_flags {
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1),
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET = BIT(2),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END = BIT(3),
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END = BIT(4),
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER = BIT(5),
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER = BIT(6),
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION = BIT(7),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START = BIT(8),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END = BIT(9),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags0 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(17),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(18),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(19),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(20),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(21),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(22),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(23),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(24),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(25),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(26),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(27),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(28),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(29),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags1 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(17),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags2 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(17),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(18),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(19),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(20),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(21),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(22),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(23),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(24),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(25),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(26),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(27),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(28),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(29),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags3 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(17),
+};
+
+enum htt_rx_data_pkt_filter_tlv_flasg3 {
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(18),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(19),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(20),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(21),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(22),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(23),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(24),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(25),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(26),
+};
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS1 (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS1 (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS1 (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG2 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG2 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG2 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG3 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG3 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG3 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_FP_DATA_FILTER_FLASG3 (HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MD_DATA_FILTER_FLASG3 (HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MO_DATA_FILTER_FLASG3 (HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG2 \
+ (HTT_RX_FP_CTRL_FILTER_FLASG2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG2 \
+ (HTT_RX_MO_CTRL_FILTER_FLASG2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG3 HTT_RX_FP_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG3 HTT_RX_MO_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_FP_DATA_FILTER_FLASG3 HTT_RX_FP_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_DATA_FILTER_FLASG3 HTT_RX_MO_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION)
+
+struct htt_rx_ring_selection_cfg_cmd {
+ u32 info0;
+ u32 info1;
+ u32 pkt_type_en_flags0;
+ u32 pkt_type_en_flags1;
+ u32 pkt_type_en_flags2;
+ u32 pkt_type_en_flags3;
+ u32 rx_filter_tlv;
+} __packed;
+
+struct htt_rx_ring_tlv_filter {
+ u32 rx_filter; /* see htt_rx_filter_tlv_flags */
+ u32 pkt_filter_flags0; /* MGMT */
+ u32 pkt_filter_flags1; /* MGMT */
+ u32 pkt_filter_flags2; /* CTRL */
+ u32 pkt_filter_flags3; /* DATA */
+};
+
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE BIT(0)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END BIT(1)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END BIT(2)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING GENMASK(10, 3)
+
+/* Enumeration for full monitor mode destination ring select
+ * 0 - REO destination ring select
+ * 1 - FW destination ring select
+ * 2 - SW destination ring select
+ * 3 - Release destination ring select
+ */
+enum htt_rx_full_mon_release_ring {
+ HTT_RX_MON_RING_REO,
+ HTT_RX_MON_RING_FW,
+ HTT_RX_MON_RING_SW,
+ HTT_RX_MON_RING_RELEASE,
+};
+
+struct htt_rx_full_monitor_mode_cfg_cmd {
+ u32 info0;
+ u32 cfg;
+} __packed;
+
+/* HTT message target->host */
+
+enum htt_t2h_msg_type {
+ HTT_T2H_MSG_TYPE_VERSION_CONF,
+ HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_T2H_MSG_TYPE_PEER_MAP2 = 0x1e,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP2 = 0x1f,
+ HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d,
+ HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c,
+ HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND = 0x24,
+};
+
+#define HTT_TARGET_VERSION_MAJOR 3
+
+#define HTT_T2H_MSG_TYPE GENMASK(7, 0)
+#define HTT_T2H_VERSION_CONF_MINOR GENMASK(15, 8)
+#define HTT_T2H_VERSION_CONF_MAJOR GENMASK(23, 16)
+
+struct htt_t2h_version_conf_msg {
+ u32 version;
+} __packed;
+
+#define HTT_T2H_PEER_MAP_INFO_VDEV_ID GENMASK(15, 8)
+#define HTT_T2H_PEER_MAP_INFO_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M BIT(16)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S 16
+
+struct htt_t2h_peer_map_event {
+ u32 info;
+ u32 mac_addr_l32;
+ u32 info1;
+ u32 info2;
+} __packed;
+
+#define HTT_T2H_PEER_UNMAP_INFO_VDEV_ID HTT_T2H_PEER_MAP_INFO_VDEV_ID
+#define HTT_T2H_PEER_UNMAP_INFO_PEER_ID HTT_T2H_PEER_MAP_INFO_PEER_ID
+#define HTT_T2H_PEER_UNMAP_INFO1_MAC_ADDR_H16 \
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_M HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_S HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S
+
+struct htt_t2h_peer_unmap_event {
+ u32 info;
+ u32 mac_addr_l32;
+ u32 info1;
+} __packed;
+
+struct htt_resp_msg {
+ union {
+ struct htt_t2h_version_conf_msg version_msg;
+ struct htt_t2h_peer_map_event peer_map_ev;
+ struct htt_t2h_peer_unmap_event peer_unmap_ev;
+ };
+} __packed;
+
+#define HTT_BACKPRESSURE_EVENT_PDEV_ID_M GENMASK(15, 8)
+#define HTT_BACKPRESSURE_EVENT_RING_TYPE_M GENMASK(23, 16)
+#define HTT_BACKPRESSURE_EVENT_RING_ID_M GENMASK(31, 24)
+
+#define HTT_BACKPRESSURE_EVENT_HP_M GENMASK(15, 0)
+#define HTT_BACKPRESSURE_EVENT_TP_M GENMASK(31, 16)
+
+#define HTT_BACKPRESSURE_UMAC_RING_TYPE 0
+#define HTT_BACKPRESSURE_LMAC_RING_TYPE 1
+
+enum htt_backpressure_umac_ringid {
+ HTT_SW_RING_IDX_REO_REO2SW1_RING,
+ HTT_SW_RING_IDX_REO_REO2SW2_RING,
+ HTT_SW_RING_IDX_REO_REO2SW3_RING,
+ HTT_SW_RING_IDX_REO_REO2SW4_RING,
+ HTT_SW_RING_IDX_REO_WBM2REO_LINK_RING,
+ HTT_SW_RING_IDX_REO_REO2TCL_RING,
+ HTT_SW_RING_IDX_REO_REO2FW_RING,
+ HTT_SW_RING_IDX_REO_REO_RELEASE_RING,
+ HTT_SW_RING_IDX_WBM_PPE_RELEASE_RING,
+ HTT_SW_RING_IDX_TCL_TCL2TQM_RING,
+ HTT_SW_RING_IDX_WBM_TQM_RELEASE_RING,
+ HTT_SW_RING_IDX_WBM_REO_RELEASE_RING,
+ HTT_SW_RING_IDX_WBM_WBM2SW0_RELEASE_RING,
+ HTT_SW_RING_IDX_WBM_WBM2SW1_RELEASE_RING,
+ HTT_SW_RING_IDX_WBM_WBM2SW2_RELEASE_RING,
+ HTT_SW_RING_IDX_WBM_WBM2SW3_RELEASE_RING,
+ HTT_SW_RING_IDX_REO_REO_CMD_RING,
+ HTT_SW_RING_IDX_REO_REO_STATUS_RING,
+ HTT_SW_UMAC_RING_IDX_MAX,
+};
+
+enum htt_backpressure_lmac_ringid {
+ HTT_SW_RING_IDX_FW2RXDMA_BUF_RING,
+ HTT_SW_RING_IDX_FW2RXDMA_STATUS_RING,
+ HTT_SW_RING_IDX_FW2RXDMA_LINK_RING,
+ HTT_SW_RING_IDX_SW2RXDMA_BUF_RING,
+ HTT_SW_RING_IDX_WBM2RXDMA_LINK_RING,
+ HTT_SW_RING_IDX_RXDMA2FW_RING,
+ HTT_SW_RING_IDX_RXDMA2SW_RING,
+ HTT_SW_RING_IDX_RXDMA2RELEASE_RING,
+ HTT_SW_RING_IDX_RXDMA2REO_RING,
+ HTT_SW_RING_IDX_MONITOR_STATUS_RING,
+ HTT_SW_RING_IDX_MONITOR_BUF_RING,
+ HTT_SW_RING_IDX_MONITOR_DESC_RING,
+ HTT_SW_RING_IDX_MONITOR_DEST_RING,
+ HTT_SW_LMAC_RING_IDX_MAX,
+};
+
+/* ppdu stats
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host ppdu stats indication message.
+ *
+ *
+ * |31 16|15 12|11 10|9 8|7 0 |
+ * |----------------------------------------------------------------------|
+ * | payload_size | rsvd |pdev_id|mac_id | msg type |
+ * |----------------------------------------------------------------------|
+ * | ppdu_id |
+ * |----------------------------------------------------------------------|
+ * | Timestamp in us |
+ * |----------------------------------------------------------------------|
+ * | reserved |
+ * |----------------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see htt_ppdu_stats.h) |
+ * |----------------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a PPDU STATS indication
+ * message.
+ * Value: 0x1d
+ * - mac_id
+ * Bits 9:8
+ * Purpose: mac_id of this ppdu_id
+ * Value: 0-3
+ * - pdev_id
+ * Bits 11:10
+ * Purpose: pdev_id of this ppdu_id
+ * Value: 0-3
+ * 0 (for rings at SOC level),
+ * 1/2/3 PDEV -> 0/1/2
+ * - payload_size
+ * Bits 31:16
+ * Purpose: total tlv size
+ * Value: payload_size in bytes
+ */
+
+#define HTT_T2H_PPDU_STATS_INFO_PDEV_ID GENMASK(11, 10)
+#define HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE GENMASK(31, 16)
+
+struct ath11k_htt_ppdu_stats_msg {
+ u32 info;
+ u32 ppdu_id;
+ u32 timestamp;
+ u32 rsvd;
+ u8 data[];
+} __packed;
+
+struct htt_tlv {
+ u32 header;
+ u8 value[];
+} __packed;
+
+#define HTT_TLV_TAG GENMASK(11, 0)
+#define HTT_TLV_LEN GENMASK(23, 12)
+
+enum HTT_PPDU_STATS_BW {
+ HTT_PPDU_STATS_BANDWIDTH_5MHZ = 0,
+ HTT_PPDU_STATS_BANDWIDTH_10MHZ = 1,
+ HTT_PPDU_STATS_BANDWIDTH_20MHZ = 2,
+ HTT_PPDU_STATS_BANDWIDTH_40MHZ = 3,
+ HTT_PPDU_STATS_BANDWIDTH_80MHZ = 4,
+ HTT_PPDU_STATS_BANDWIDTH_160MHZ = 5, /* includes 80+80 */
+ HTT_PPDU_STATS_BANDWIDTH_DYN = 6,
+};
+
+#define HTT_PPDU_STATS_CMN_FLAGS_FRAME_TYPE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_CMN_FLAGS_QUEUE_TYPE_M GENMASK(15, 8)
+/* bw - HTT_PPDU_STATS_BW */
+#define HTT_PPDU_STATS_CMN_FLAGS_BW_M GENMASK(19, 16)
+
+struct htt_ppdu_stats_common {
+ u32 ppdu_id;
+ u16 sched_cmdid;
+ u8 ring_id;
+ u8 num_users;
+ u32 flags; /* %HTT_PPDU_STATS_COMMON_FLAGS_*/
+ u32 chain_mask;
+ u32 fes_duration_us; /* frame exchange sequence */
+ u32 ppdu_sch_eval_start_tstmp_us;
+ u32 ppdu_sch_end_tstmp_us;
+ u32 ppdu_start_tstmp_us;
+ /* BIT [15 : 0] - phy mode (WLAN_PHY_MODE) with which ppdu was transmitted
+ * BIT [31 : 16] - bandwidth (in MHz) with which ppdu was transmitted
+ */
+ u16 phy_mode;
+ u16 bw_mhz;
+} __packed;
+
+enum htt_ppdu_stats_gi {
+ HTT_PPDU_STATS_SGI_0_8_US,
+ HTT_PPDU_STATS_SGI_0_4_US,
+ HTT_PPDU_STATS_SGI_1_6_US,
+ HTT_PPDU_STATS_SGI_3_2_US,
+};
+
+#define HTT_PPDU_STATS_USER_RATE_INFO0_USER_POS_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USER_RATE_INFO0_MU_GROUP_ID_M GENMASK(11, 4)
+
+#define HTT_PPDU_STATS_USER_RATE_INFO1_RESP_TYPE_VALD_M BIT(0)
+#define HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M GENMASK(5, 1)
+
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LDPC_M BIT(29)
+
+#define HTT_USR_RATE_PREAMBLE(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M, _val)
+#define HTT_USR_RATE_BW(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M, _val)
+#define HTT_USR_RATE_NSS(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M, _val)
+#define HTT_USR_RATE_MCS(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M, _val)
+#define HTT_USR_RATE_GI(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M, _val)
+#define HTT_USR_RATE_DCM(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M, _val)
+
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LDPC_M BIT(29)
+
+struct htt_ppdu_stats_user_rate {
+ u8 tid_num;
+ u8 reserved0;
+ u16 sw_peer_id;
+ u32 info0; /* %HTT_PPDU_STATS_USER_RATE_INFO0_*/
+ u16 ru_end;
+ u16 ru_start;
+ u16 resp_ru_end;
+ u16 resp_ru_start;
+ u32 info1; /* %HTT_PPDU_STATS_USER_RATE_INFO1_ */
+ u32 rate_flags; /* %HTT_PPDU_STATS_USER_RATE_FLAGS_ */
+ /* Note: resp_rate_info is only valid for if resp_type is UL */
+ u32 resp_rate_flags; /* %HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_ */
+} __packed;
+
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M GENMASK(10, 9)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BW_M GENMASK(13, 11)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_SGI_M BIT(14)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M GENMASK(31, 16)
+
+#define HTT_TX_INFO_IS_AMSDU(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M, _flags)
+#define HTT_TX_INFO_BA_ACK_FAILED(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M, _flags)
+#define HTT_TX_INFO_RATECODE(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M, _flags)
+#define HTT_TX_INFO_PEERID(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M, _flags)
+
+enum htt_ppdu_stats_usr_compln_status {
+ HTT_PPDU_STATS_USER_STATUS_OK,
+ HTT_PPDU_STATS_USER_STATUS_FILTERED,
+ HTT_PPDU_STATS_USER_STATUS_RESP_TIMEOUT,
+ HTT_PPDU_STATS_USER_STATUS_RESP_MISMATCH,
+ HTT_PPDU_STATS_USER_STATUS_ABORT,
+};
+
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_RESP_TYPE_M GENMASK(12, 9)
+
+#define HTT_USR_CMPLTN_IS_AMPDU(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M, _val)
+#define HTT_USR_CMPLTN_LONG_RETRY(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M, _val)
+#define HTT_USR_CMPLTN_SHORT_RETRY(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M, _val)
+
+struct htt_ppdu_stats_usr_cmpltn_cmn {
+ u8 status;
+ u8 tid_num;
+ u16 sw_peer_id;
+ /* RSSI value of last ack packet (units = dB above noise floor) */
+ u32 ack_rssi;
+ u16 mpdu_tried;
+ u16 mpdu_success;
+ u32 flags; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRIES*/
+} __packed;
+
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MPDU_M GENMASK(8, 0)
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M GENMASK(24, 9)
+#define HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM GENMASK(31, 25)
+
+#define HTT_PPDU_STATS_NON_QOS_TID 16
+
+struct htt_ppdu_stats_usr_cmpltn_ack_ba_status {
+ u32 ppdu_id;
+ u16 sw_peer_id;
+ u16 reserved0;
+ u32 info; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_INFO_ */
+ u16 current_seq;
+ u16 start_seq;
+ u32 success_bytes;
+} __packed;
+
+struct htt_ppdu_user_stats {
+ u16 peer_id;
+ u32 tlv_flags;
+ bool is_valid_peer_id;
+ struct htt_ppdu_stats_user_rate rate;
+ struct htt_ppdu_stats_usr_cmpltn_cmn cmpltn_cmn;
+ struct htt_ppdu_stats_usr_cmpltn_ack_ba_status ack_ba;
+};
+
+#define HTT_PPDU_STATS_MAX_USERS 8
+#define HTT_PPDU_DESC_MAX_DEPTH 16
+
+struct htt_ppdu_stats {
+ struct htt_ppdu_stats_common common;
+ struct htt_ppdu_user_stats user_stats[HTT_PPDU_STATS_MAX_USERS];
+};
+
+struct htt_ppdu_stats_info {
+ u32 ppdu_id;
+ struct htt_ppdu_stats ppdu_stats;
+ struct list_head list;
+};
+
+/* @brief target -> host packet log message
+ *
+ * @details
+ * The following field definitions describe the format of the packet log
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header,followed by a variable number
+ * of 32-bit character values.
+ *
+ * |31 16|15 12|11 10|9 8|7 0|
+ * |------------------------------------------------------------------|
+ * | payload_size | rsvd |pdev_id|mac_id| msg type |
+ * |------------------------------------------------------------------|
+ * | payload |
+ * |------------------------------------------------------------------|
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this as a pktlog message
+ * Value: HTT_T2H_MSG_TYPE_PKTLOG
+ * - mac_id
+ * Bits 9:8
+ * Purpose: identifies which MAC/PHY instance generated this pktlog info
+ * Value: 0-3
+ * - pdev_id
+ * Bits 11:10
+ * Purpose: pdev_id
+ * Value: 0-3
+ * 0 (for rings at SOC level),
+ * 1/2/3 PDEV -> 0/1/2
+ * - payload_size
+ * Bits 31:16
+ * Purpose: explicitly specify the payload size
+ * Value: payload size in bytes (payload size is a multiple of 4 bytes)
+ */
+struct htt_pktlog_msg {
+ u32 hdr;
+ u8 payload[];
+};
+
+/* @brief host -> target FW extended statistics retrieve
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW extended stats retrieve message.
+ * The message specifies the type of stats the host wants to retrieve.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | reserved | stats type | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * | config param [0] |
+ * |-----------------------------------------------------------|
+ * | config param [1] |
+ * |-----------------------------------------------------------|
+ * | config param [2] |
+ * |-----------------------------------------------------------|
+ * | config param [3] |
+ * |-----------------------------------------------------------|
+ * | reserved |
+ * |-----------------------------------------------------------|
+ * | cookie LSBs |
+ * |-----------------------------------------------------------|
+ * | cookie MSBs |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a extended stats upload request message
+ * Value: 0x10
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies the mask of PDEVs to retrieve stats from
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - STATS_TYPE
+ * Bits 23:16
+ * Purpose: identifies which FW statistics to upload
+ * Value: Defined by htt_dbg_ext_stats_type (see htt_stats.h)
+ * - Reserved
+ * Bits 31:24
+ * - CONFIG_PARAM [0]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [1]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [2]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [3]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - Reserved [31:0] for future use.
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ */
+
+struct htt_ext_stats_cfg_hdr {
+ u8 msg_type;
+ u8 pdev_mask;
+ u8 stats_type;
+ u8 reserved;
+} __packed;
+
+struct htt_ext_stats_cfg_cmd {
+ struct htt_ext_stats_cfg_hdr hdr;
+ u32 cfg_param0;
+ u32 cfg_param1;
+ u32 cfg_param2;
+ u32 cfg_param3;
+ u32 reserved;
+ u32 cookie_lsb;
+ u32 cookie_msb;
+} __packed;
+
+/* htt stats config default params */
+#define HTT_STAT_DEFAULT_RESET_START_OFFSET 0
+#define HTT_STAT_DEFAULT_CFG0_ALL_HWQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_TXQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_CMDQS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_RINGS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS 0xff
+#define HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE 0x00
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS 0x00
+
+/* HTT_DBG_EXT_STATS_PEER_INFO
+ * PARAMS:
+ * @config_param0:
+ * [Bit0] - [0] for sw_peer_id, [1] for mac_addr based request
+ * [Bit15 : Bit 1] htt_peer_stats_req_mode_t
+ * [Bit31 : Bit16] sw_peer_id
+ * @config_param1:
+ * peer_stats_req_type_mask:32 (enum htt_peer_stats_tlv_enum)
+ * 0 bit htt_peer_stats_cmn_tlv
+ * 1 bit htt_peer_details_tlv
+ * 2 bit htt_tx_peer_rate_stats_tlv
+ * 3 bit htt_rx_peer_rate_stats_tlv
+ * 4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv
+ * 5 bit htt_rx_tid_stats_tlv
+ * 6 bit htt_msdu_flow_stats_tlv
+ * @config_param2: [Bit31 : Bit0] mac_addr31to0
+ * @config_param3: [Bit15 : Bit0] mac_addr47to32
+ * [Bit31 : Bit16] reserved
+ */
+#define HTT_STAT_PEER_INFO_MAC_ADDR BIT(0)
+#define HTT_STAT_DEFAULT_PEER_REQ_TYPE 0x7f
+
+/* Used to set different configs to the specified stats type.*/
+struct htt_ext_stats_cfg_params {
+ u32 cfg0;
+ u32 cfg1;
+ u32 cfg2;
+ u32 cfg3;
+};
+
+/* @brief target -> host extended statistics upload
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a single stats can span over multiple HTT stats indication
+ * due to the HTT message size limitation so every HTT ext stats indication
+ * will have tag-length-value stats information elements.
+ * The tag-length header for each HTT stats IND message also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A Done bit 1's indicate the end of the of stats info elements.
+ *
+ *
+ * |31 16|15 12|11|10 8|7 5|4 0|
+ * |--------------------------------------------------------------|
+ * | reserved | msg type |
+ * |--------------------------------------------------------------|
+ * | cookie LSBs |
+ * |--------------------------------------------------------------|
+ * | cookie MSBs |
+ * |--------------------------------------------------------------|
+ * | stats entry length | rsvd | D| S | stat type |
+ * |--------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see htt_stats.h) |
+ * |--------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a extended statistics upload confirmation
+ * message.
+ * Value: 0x1c
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ * - STAT_TYPE
+ * Bits 7:0
+ * Purpose: identifies the type of statistics info held in the
+ * following information element
+ * Value: htt_dbg_ext_stats_type
+ * - STATUS
+ * Bits 10:8
+ * Purpose: indicate whether the requested stats are present
+ * Value: htt_dbg_ext_stats_status
+ * - DONE
+ * Bits 11
+ * Purpose:
+ * Indicates the completion of the stats entry, this will be the last
+ * stats conf HTT segment for the requested stats type.
+ * Value:
+ * 0 -> the stats retrieval is ongoing
+ * 1 -> the stats retrieval is complete
+ * - LENGTH
+ * Bits 31:16
+ * Purpose: indicate the stats information size
+ * Value: This field specifies the number of bytes of stats information
+ * that follows the element tag-length header.
+ * It is expected but not required that this length is a multiple of
+ * 4 bytes.
+ */
+
+#define HTT_T2H_EXT_STATS_INFO1_DONE BIT(11)
+#define HTT_T2H_EXT_STATS_INFO1_LENGTH GENMASK(31, 16)
+
+struct ath11k_htt_extd_stats_msg {
+ u32 info0;
+ u64 cookie;
+ u32 info1;
+ u8 data[];
+} __packed;
+
+#define HTT_MAC_ADDR_L32_0 GENMASK(7, 0)
+#define HTT_MAC_ADDR_L32_1 GENMASK(15, 8)
+#define HTT_MAC_ADDR_L32_2 GENMASK(23, 16)
+#define HTT_MAC_ADDR_L32_3 GENMASK(31, 24)
+#define HTT_MAC_ADDR_H16_0 GENMASK(7, 0)
+#define HTT_MAC_ADDR_H16_1 GENMASK(15, 8)
+
+struct htt_mac_addr {
+ u32 mac_addr_l32;
+ u32 mac_addr_h16;
+};
+
+static inline void ath11k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr)
+{
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+ addr_l32 = swab32(addr_l32);
+ addr_h16 = swab16(addr_h16);
+ }
+
+ memcpy(addr, &addr_l32, 4);
+ memcpy(addr + 4, &addr_h16, ETH_ALEN - 4);
+}
+
+int ath11k_dp_service_srng(struct ath11k_base *ab,
+ struct ath11k_ext_irq_grp *irq_grp,
+ int budget);
+int ath11k_dp_htt_connect(struct ath11k_dp *dp);
+void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif);
+void ath11k_dp_free(struct ath11k_base *ab);
+int ath11k_dp_alloc(struct ath11k_base *ab);
+int ath11k_dp_pdev_alloc(struct ath11k_base *ab);
+void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab);
+void ath11k_dp_pdev_free(struct ath11k_base *ab);
+int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type);
+int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr);
+void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr);
+void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring);
+int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
+ enum hal_ring_type type, int ring_num,
+ int mac_id, int num_entries);
+void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ u32 ring_type, struct dp_srng *ring);
+int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks,
+ u32 ring_type, struct hal_srng *srng,
+ u32 n_link_desc);
+void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
+ struct hal_srng *srng,
+ struct ath11k_hp_update_timer *update_timer);
+void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer);
+void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer,
+ u32 interval, u32 ring_id);
+void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
new file mode 100644
index 000000000000..b9e976ddcbbf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -0,0 +1,5794 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/ieee80211.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <crypto/hash.h>
+#include "core.h"
+#include "debug.h"
+#include "debugfs_htt_stats.h"
+#include "debugfs_sta.h"
+#include "hal_desc.h"
+#include "hw.h"
+#include "dp_rx.h"
+#include "hal_rx.h"
+#include "dp_tx.h"
+#include "peer.h"
+
+#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
+
+static inline
+u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
+}
+
+static inline
+enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
+ return HAL_ENCRYPT_TYPE_OPEN;
+
+ return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
+}
+
+static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
+}
+
+static inline
+bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
+}
+
+static inline
+u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
+}
+
+static inline
+bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
+}
+
+static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
+}
+
+static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
+ return ieee80211_has_morefrags(hdr->frame_control);
+}
+
+static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
+ return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+}
+
+static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
+}
+
+static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
+}
+
+static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
+ __le32_to_cpu(attn->info2));
+}
+
+static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
+ __le32_to_cpu(attn->info1));
+}
+
+static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
+ __le32_to_cpu(attn->info1));
+}
+
+static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
+{
+ return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
+ __le32_to_cpu(attn->info2)) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
+{
+ u32 info = __le32_to_cpu(attn->info1);
+ u32 errmap = 0;
+
+ if (info & RX_ATTENTION_INFO1_FCS_ERR)
+ errmap |= DP_RX_MPDU_ERR_FCS;
+
+ if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
+ errmap |= DP_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
+ errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
+ errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
+ errmap |= DP_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
+ errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
+ errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ struct rx_attention *rx_attention;
+ u32 errmap;
+
+ rx_attention = ath11k_dp_rx_get_attention(ab, desc);
+ errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
+
+ return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
+}
+
+static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
+}
+
+static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
+}
+
+static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
+}
+
+static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
+}
+
+static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
+}
+
+static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
+}
+
+static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
+}
+
+static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
+}
+
+static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
+}
+
+static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
+}
+
+static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
+}
+
+static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);
+}
+
+static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
+ struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
+}
+
+static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
+{
+ return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
+ __le32_to_cpu(attn->info1));
+}
+
+static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
+ struct hal_rx_desc *rx_desc)
+{
+ u8 *rx_pkt_hdr;
+
+ rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);
+
+ return rx_pkt_hdr;
+}
+
+static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *rx_desc)
+{
+ u32 tlv_tag;
+
+ tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);
+
+ return tlv_tag == HAL_RX_MPDU_START;
+}
+
+static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
+ struct hal_rx_desc *rx_desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
+}
+
+static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
+ struct hal_rx_desc *desc,
+ u16 len)
+{
+ ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
+}
+
+static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
+
+ return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
+ (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
+ __le32_to_cpu(attn->info1)));
+}
+
+static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);
+}
+
+static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);
+}
+
+static void ath11k_dp_service_mon_ring(struct timer_list *t)
+{
+ struct ath11k_base *ab = timer_container_of(ab, t, mon_reap_timer);
+ int i;
+
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)
+ ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
+
+ mod_timer(&ab->mon_reap_timer, jiffies +
+ msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
+}
+
+static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
+{
+ int i, reaped = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
+
+ do {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)
+ reaped += ath11k_dp_rx_process_mon_rings(ab, i,
+ NULL,
+ DP_MON_SERVICE_BUDGET);
+
+ /* nothing more to reap */
+ if (reaped < DP_MON_SERVICE_BUDGET)
+ return 0;
+
+ } while (time_before(jiffies, timeout));
+
+ ath11k_warn(ab, "dp mon ring purge timeout");
+
+ return -ETIMEDOUT;
+}
+
+/* Returns number of Rx buffers replenished */
+int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr)
+{
+ struct hal_srng *srng;
+ u32 *desc;
+ struct sk_buff *skb;
+ int num_free;
+ int num_remain;
+ int buf_id;
+ u32 cookie;
+ dma_addr_t paddr;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
+ if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
+ req_entries = num_free;
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
+ DP_RX_BUFFER_ALIGN_SIZE);
+ if (!skb)
+ break;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ DP_RX_BUFFER_ALIGN_SIZE)) {
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1,
+ (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (buf_id <= 0)
+ goto fail_dma_unmap;
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_idr_remove;
+
+ ATH11K_SKB_RXCB(skb)->paddr = paddr;
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ num_remain--;
+
+ ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_idr_remove:
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
+static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
+ struct dp_rxdma_ring *rx_ring)
+{
+ struct sk_buff *skb;
+ int buf_id;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ /* TODO: Understand where internal driver does this dma_unmap
+ * of rxdma_buffer.
+ */
+ dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ idr_destroy(&rx_ring->bufs_idr);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ return 0;
+}
+
+static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ int i;
+
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ rx_ring = &dp->rx_mon_status_refill_ring[i];
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+ }
+
+ return 0;
+}
+
+static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
+ struct dp_rxdma_ring *rx_ring,
+ u32 ringtype)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ int num_entries;
+
+ num_entries = rx_ring->refill_buf_ring.size /
+ ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
+
+ rx_ring->bufs_max = num_entries;
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
+ ar->ab->hw_params.hal_params->rx_buf_rbm);
+ return 0;
+}
+
+static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ int i;
+
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
+
+ if (ar->ab->hw_params.rxdma1_enable) {
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ rx_ring = &dp->rx_mon_status_refill_ring[i];
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
+ }
+
+ return 0;
+}
+
+static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
+ int i;
+
+ ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
+
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ if (ab->hw_params.rx_mac_buf_ring)
+ ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
+
+ ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
+ ath11k_dp_srng_cleanup(ab,
+ &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
+ }
+
+ ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
+}
+
+void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++)
+ ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
+}
+
+int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int ret;
+ int i;
+
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
+ HAL_REO_DST, i, 0,
+ DP_REO_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup reo_dst_ring\n");
+ goto err_reo_cleanup;
+ }
+ }
+
+ return 0;
+
+err_reo_cleanup:
+ ath11k_dp_pdev_reo_cleanup(ab);
+
+ return ret;
+}
+
+static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
+ struct dp_srng *srng = NULL;
+ int i;
+ int ret;
+
+ ret = ath11k_dp_srng_setup(ar->ab,
+ &dp->rx_refill_buf_ring.refill_buf_ring,
+ HAL_RXDMA_BUF, 0,
+ dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
+ return ret;
+ }
+
+ if (ar->ab->hw_params.rx_mac_buf_ring) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ ret = ath11k_dp_srng_setup(ar->ab,
+ &dp->rx_mac_buf_ring[i],
+ HAL_RXDMA_BUF, 1,
+ dp->mac_id + i, 1024);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
+ i);
+ return ret;
+ }
+ }
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
+ HAL_RXDMA_DST, 0, dp->mac_id + i,
+ DP_RXDMA_ERR_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
+ ret = ath11k_dp_srng_setup(ar->ab,
+ srng,
+ HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
+ DP_RXDMA_MON_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup rx_mon_status_refill_ring %d\n", i);
+ return ret;
+ }
+ }
+
+ /* if rxdma1_enable is false, then it doesn't need
+ * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
+ * and rxdma_mon_desc_ring.
+ * init reap timer for QCA6390.
+ */
+ if (!ar->ab->hw_params.rxdma1_enable) {
+ //init mon status buffer reap timer
+ timer_setup(&ar->ab->mon_reap_timer,
+ ath11k_dp_service_mon_ring, 0);
+ return 0;
+ }
+
+ ret = ath11k_dp_srng_setup(ar->ab,
+ &dp->rxdma_mon_buf_ring.refill_buf_ring,
+ HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
+ DP_RXDMA_MONITOR_BUF_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_BUF\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
+ HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
+ DP_RXDMA_MONITOR_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_DST\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
+ HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
+ DP_RXDMA_MONITOR_DESC_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_DESC\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_reo_cmd *cmd, *tmp;
+ struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
+ struct dp_rx_tid *rx_tid;
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ list_del(&cmd->list);
+ rx_tid = &cmd->data;
+ if (rx_tid->vaddr_unaligned) {
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
+ }
+ kfree(cmd);
+ }
+
+ list_for_each_entry_safe(cmd_cache, tmp_cache,
+ &dp->reo_cmd_cache_flush_list, list) {
+ list_del(&cmd_cache->list);
+ dp->reo_cmd_cache_flush_count--;
+ rx_tid = &cmd_cache->data;
+ if (rx_tid->vaddr_unaligned) {
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
+ }
+ kfree(cmd_cache);
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+}
+
+static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct dp_rx_tid *rx_tid = ctx;
+
+ if (status != HAL_REO_CMD_SUCCESS)
+ ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
+ rx_tid->tid, status);
+ if (rx_tid->vaddr_unaligned) {
+ dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
+ }
+}
+
+static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
+ struct dp_rx_tid *rx_tid)
+{
+ struct ath11k_hal_reo_cmd cmd = {};
+ unsigned long tot_desc_sz, desc_sz;
+ int ret;
+
+ tot_desc_sz = rx_tid->size;
+ desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
+
+ while (tot_desc_sz > desc_sz) {
+ tot_desc_sz -= desc_sz;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE, &cmd,
+ NULL);
+ if (ret)
+ ath11k_warn(ab,
+ "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
+ ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE,
+ &cmd, ath11k_dp_reo_cmd_free);
+ if (ret) {
+ ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
+ }
+}
+
+static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct ath11k_base *ab = dp->ab;
+ struct dp_rx_tid *rx_tid = ctx;
+ struct dp_reo_cache_flush_elem *elem, *tmp;
+
+ if (status == HAL_REO_CMD_DRAIN) {
+ goto free_desc;
+ } else if (status != HAL_REO_CMD_SUCCESS) {
+ /* Shouldn't happen! Cleanup in case of other failure? */
+ ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
+ rx_tid->tid, status);
+ return;
+ }
+
+ elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
+ if (!elem)
+ goto free_desc;
+
+ elem->ts = jiffies;
+ memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
+ dp->reo_cmd_cache_flush_count++;
+
+ /* Flush and invalidate aged REO desc from HW cache */
+ list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
+ list) {
+ if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
+ time_after(jiffies, elem->ts +
+ msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
+ list_del(&elem->list);
+ dp->reo_cmd_cache_flush_count--;
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ ath11k_dp_reo_cache_flush(ab, &elem->data);
+ kfree(elem);
+ spin_lock_bh(&dp->reo_cmd_lock);
+ }
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ return;
+free_desc:
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
+}
+
+void ath11k_peer_rx_tid_delete(struct ath11k *ar,
+ struct ath11k_peer *peer, u8 tid)
+{
+ struct ath11k_hal_reo_cmd cmd = {};
+ struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
+ int ret;
+
+ if (!rx_tid->active)
+ return;
+
+ rx_tid->active = false;
+
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
+ ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ ath11k_dp_rx_tid_del_func);
+ if (ret) {
+ if (ret != -ESHUTDOWN)
+ ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
+ tid, ret);
+ dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size,
+ rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
+ }
+
+ rx_tid->paddr = 0;
+ rx_tid->paddr_unaligned = 0;
+ rx_tid->size = 0;
+ rx_tid->unaligned_size = 0;
+}
+
+static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
+ u32 *link_desc,
+ enum hal_wbm_rel_bm_act action)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ u32 *desc;
+ int ret = 0;
+
+ srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOBUFS;
+ goto exit;
+ }
+
+ ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
+ action);
+
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
+{
+ struct ath11k_base *ab = rx_tid->ab;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (rx_tid->dst_ring_desc) {
+ if (rel_link_desc)
+ ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ kfree(rx_tid->dst_ring_desc);
+ rx_tid->dst_ring_desc = NULL;
+ }
+
+ rx_tid->cur_sn = 0;
+ rx_tid->last_frag_no = 0;
+ rx_tid->rx_frag_bitmap = 0;
+ __skb_queue_purge(&rx_tid->rx_frags);
+}
+
+void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
+{
+ struct dp_rx_tid *rx_tid;
+ int i;
+
+ lockdep_assert_held(&ar->ab->base_lock);
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ timer_delete_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ar->ab->base_lock);
+
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+ }
+}
+
+void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
+{
+ struct dp_rx_tid *rx_tid;
+ int i;
+
+ lockdep_assert_held(&ar->ab->base_lock);
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+
+ ath11k_peer_rx_tid_delete(ar, peer, i);
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ timer_delete_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ar->ab->base_lock);
+ }
+}
+
+static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
+ struct ath11k_peer *peer,
+ struct dp_rx_tid *rx_tid,
+ u32 ba_win_sz, u16 ssn,
+ bool update_ssn)
+{
+ struct ath11k_hal_reo_cmd cmd = {};
+ int ret;
+
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
+ cmd.ba_window_size = ba_win_sz;
+
+ if (update_ssn) {
+ cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
+ cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
+ }
+
+ ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ NULL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ return ret;
+ }
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ return 0;
+}
+
+static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
+ const u8 *peer_mac, int vdev_id, u8 tid)
+{
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
+ goto unlock_exit;
+ }
+
+ rx_tid = &peer->rx_tid[tid];
+ if (!rx_tid->active)
+ goto unlock_exit;
+
+ dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned,
+ rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+ rx_tid->vaddr_unaligned = NULL;
+
+ rx_tid->active = false;
+
+unlock_exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
+ u8 tid, u32 ba_win_sz, u16 ssn,
+ enum hal_pn_type pn_type)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ u32 hw_desc_sz, *vaddr;
+ void *vaddr_unaligned;
+ dma_addr_t paddr;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n",
+ peer_mac);
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ rx_tid = &peer->rx_tid[tid];
+ /* Update the tid queue if it is already setup */
+ if (rx_tid->active) {
+ paddr = rx_tid->paddr;
+ ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
+ ba_win_sz, ssn, true);
+ spin_unlock_bh(&ab->base_lock);
+ if (ret) {
+ ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d",
+ peer_mac, tid, ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
+ peer_mac, paddr,
+ tid, 1, ba_win_sz);
+ if (ret)
+ ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n",
+ peer_mac, tid, ret);
+ return ret;
+ }
+
+ rx_tid->tid = tid;
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ /* TODO: Optimize the memory allocation for qos tid based on
+ * the actual BA window size in REO tid update path.
+ */
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
+ else
+ hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
+
+ rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1;
+ vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr,
+ DMA_BIDIRECTIONAL, GFP_ATOMIC);
+ if (!vaddr_unaligned) {
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOMEM;
+ }
+
+ rx_tid->vaddr_unaligned = vaddr_unaligned;
+ vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN);
+ rx_tid->paddr_unaligned = paddr;
+ rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr -
+ (unsigned long)rx_tid->vaddr_unaligned);
+ ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
+ rx_tid->size = hw_desc_sz;
+ rx_tid->active = true;
+
+ /* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup.
+ * Since these changes are not reflected in the device, driver now needs to
+ * explicitly call dma_sync_single_for_device.
+ */
+ dma_sync_single_for_device(ab->dev, rx_tid->paddr,
+ rx_tid->size,
+ DMA_TO_DEVICE);
+ spin_unlock_bh(&ab->base_lock);
+
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr,
+ tid, 1, ba_win_sz);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
+ peer_mac, tid, ret);
+ ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
+ }
+
+ return ret;
+}
+
+int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta);
+ int vdev_id = arsta->arvif->vdev_id;
+ int ret;
+
+ ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
+ params->tid, params->buf_size,
+ params->ssn, arsta->pn_type);
+ if (ret)
+ ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
+
+ return ret;
+}
+
+int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta);
+ int vdev_id = arsta->arvif->vdev_id;
+ dma_addr_t paddr;
+ bool active;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ paddr = peer->rx_tid[params->tid].paddr;
+ active = peer->rx_tid[params->tid].active;
+
+ if (!active) {
+ spin_unlock_bh(&ab->base_lock);
+ return 0;
+ }
+
+ ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
+ spin_unlock_bh(&ab->base_lock);
+ if (ret) {
+ ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
+ params->tid, ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
+ params->sta->addr, paddr,
+ params->tid, 1, 1);
+ if (ret)
+ ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
+ ret);
+
+ return ret;
+}
+
+int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
+ const u8 *peer_addr,
+ enum set_key_cmd key_cmd,
+ struct ieee80211_key_conf *key)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_hal_reo_cmd cmd = {};
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ u8 tid;
+ int ret = 0;
+
+ /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
+ * We use mac80211 PN/TSC replay check functionality for bcast/mcast
+ * for now.
+ */
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return 0;
+
+ cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
+ HAL_REO_CMD_UPD0_PN_SIZE |
+ HAL_REO_CMD_UPD0_PN_VALID |
+ HAL_REO_CMD_UPD0_PN_CHECK |
+ HAL_REO_CMD_UPD0_SVLD;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (key_cmd == SET_KEY) {
+ cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
+ cmd.pn_size = 48;
+ }
+ break;
+ default:
+ break;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
+ rx_tid = &peer->rx_tid[tid];
+ if (!rx_tid->active)
+ continue;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE,
+ &cmd, NULL);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
+ tid, ret);
+ break;
+ }
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return ret;
+}
+
+static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
+ u16 peer_id)
+{
+ int i;
+
+ for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
+ if (ppdu_stats->user_stats[i].is_valid_peer_id) {
+ if (peer_id == ppdu_stats->user_stats[i].peer_id)
+ return i;
+ } else {
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
+ u16 tag, u16 len, const void *ptr,
+ void *data)
+{
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct htt_ppdu_user_stats *user_stats;
+ int cur_user;
+ u16 peer_id;
+
+ ppdu_info = data;
+
+ switch (tag) {
+ case HTT_PPDU_STATS_TAG_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_common)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+ memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
+ sizeof(struct htt_ppdu_stats_common));
+ break;
+ case HTT_PPDU_STATS_TAG_USR_RATE:
+ if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
+ cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy((void *)&user_stats->rate, ptr,
+ sizeof(struct htt_ppdu_stats_user_rate));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
+ cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy((void *)&user_stats->cmpltn_cmn, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
+ if (len <
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ peer_id =
+ ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
+ cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy((void *)&user_stats->ack_ba, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ }
+ return 0;
+}
+
+int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const struct htt_tlv *tlv;
+ const void *begin = ptr;
+ u16 tlv_tag, tlv_len;
+ int ret = -EINVAL;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+ tlv = (struct htt_tlv *)ptr;
+ tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
+ tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+ ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+ if (ret == -ENOMEM)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+ return 0;
+}
+
+static void
+ath11k_update_per_peer_tx_stats(struct ath11k *ar,
+ struct htt_ppdu_stats *ppdu_stats, u8 user)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath11k_sta *arsta;
+ struct htt_ppdu_stats_user_rate *user_rate;
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
+ struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
+ struct htt_ppdu_stats_common *common = &ppdu_stats->common;
+ int ret;
+ u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
+ u32 succ_bytes = 0;
+ u16 rate = 0, succ_pkts = 0;
+ u32 tx_duration = 0;
+ u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
+ bool is_ampdu = false;
+
+ if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
+ return;
+
+ if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
+ is_ampdu =
+ HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
+
+ if (usr_stats->tlv_flags &
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
+ succ_bytes = usr_stats->ack_ba.success_bytes;
+ succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
+ usr_stats->ack_ba.info);
+ tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
+ usr_stats->ack_ba.info);
+ }
+
+ if (common->fes_duration_us)
+ tx_duration = common->fes_duration_us;
+
+ user_rate = &usr_stats->rate;
+ flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
+ bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
+ nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
+ mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
+ sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
+ dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
+
+ /* Note: If host configured fixed rates and in some other special
+ * cases, the broadcast/management frames are sent in different rates.
+ * Firmware rate's control to be skipped for this?
+ */
+
+ if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
+ ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
+ ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
+ ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
+ mcs, nss);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
+ ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
+ flags,
+ &rate_idx,
+ &rate);
+ if (ret < 0)
+ return;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
+
+ if (!peer || !peer->sta) {
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ return;
+ }
+
+ sta = peer->sta;
+ arsta = ath11k_sta_to_arsta(sta);
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+
+ switch (flags) {
+ case WMI_RATE_PREAMBLE_OFDM:
+ arsta->txrate.legacy = rate;
+ break;
+ case WMI_RATE_PREAMBLE_CCK:
+ arsta->txrate.legacy = rate;
+ break;
+ case WMI_RATE_PREAMBLE_HT:
+ arsta->txrate.mcs = mcs + 8 * (nss - 1);
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case WMI_RATE_PREAMBLE_VHT:
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case WMI_RATE_PREAMBLE_HE:
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+ arsta->txrate.he_dcm = dcm;
+ arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
+ arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc
+ ((user_rate->ru_end -
+ user_rate->ru_start) + 1);
+ break;
+ }
+
+ arsta->txrate.nss = nss;
+
+ arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ arsta->tx_duration += tx_duration;
+ memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
+
+ /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
+ * So skip peer stats update for mgmt packets.
+ */
+ if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
+ memset(peer_stats, 0, sizeof(*peer_stats));
+ peer_stats->succ_pkts = succ_pkts;
+ peer_stats->succ_bytes = succ_bytes;
+ peer_stats->is_ampdu = is_ampdu;
+ peer_stats->duration = tx_duration;
+ peer_stats->ba_fails =
+ HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
+ HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
+
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
+ ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+}
+
+static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
+ struct htt_ppdu_stats *ppdu_stats)
+{
+ u8 user;
+
+ for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
+ ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
+}
+
+static
+struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
+ u32 ppdu_id)
+{
+ struct htt_ppdu_stats_info *ppdu_info;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!list_empty(&ar->ppdu_stats_info)) {
+ list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
+ if (ppdu_info->ppdu_id == ppdu_id)
+ return ppdu_info;
+ }
+
+ if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
+ ppdu_info = list_first_entry(&ar->ppdu_stats_info,
+ typeof(*ppdu_info), list);
+ list_del(&ppdu_info->list);
+ ar->ppdu_stat_list_depth--;
+ ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
+ kfree(ppdu_info);
+ }
+ }
+
+ ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
+ if (!ppdu_info)
+ return NULL;
+
+ list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
+ ar->ppdu_stat_list_depth++;
+
+ return ppdu_info;
+}
+
+static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_htt_ppdu_stats_msg *msg;
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct ath11k *ar;
+ int ret;
+ u8 pdev_id;
+ u32 ppdu_id, len;
+
+ msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
+ len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
+ pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
+ ppdu_id = msg->ppdu_id;
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
+ trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
+
+ spin_lock_bh(&ar->data_lock);
+ ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
+ if (!ppdu_info) {
+ ret = -EINVAL;
+ goto out_unlock_data;
+ }
+
+ ppdu_info->ppdu_id = ppdu_id;
+ ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath11k_htt_tlv_ppdu_stats_parse,
+ (void *)ppdu_info);
+ if (ret) {
+ ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
+ goto out_unlock_data;
+ }
+
+out_unlock_data:
+ spin_unlock_bh(&ar->data_lock);
+
+out:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
+ struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
+ struct ath11k *ar;
+ u8 pdev_id;
+
+ pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
+
+ rcu_read_lock();
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
+ goto out;
+ }
+
+ trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
+ ar->ab->pktlog_defs_checksum);
+
+out:
+ rcu_read_unlock();
+}
+
+static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ u32 *data = (u32 *)skb->data;
+ u8 pdev_id, ring_type, ring_id, pdev_idx;
+ u16 hp, tp;
+ u32 backpressure_time;
+ struct ath11k_bp_stats *bp_stats;
+
+ pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
+ ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
+ ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
+ ++data;
+
+ hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
+ tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
+ ++data;
+
+ backpressure_time = *data;
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
+ pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
+
+ if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
+ if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
+ return;
+
+ bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
+ } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
+ pdev_idx = DP_HW2SW_MACID(pdev_id);
+
+ if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
+ return;
+
+ bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
+ } else {
+ ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
+ ring_type);
+ return;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ bp_stats->hp = hp;
+ bp_stats->tp = tp;
+ bp_stats->count++;
+ bp_stats->jiffies = jiffies;
+ spin_unlock_bh(&ab->base_lock);
+}
+
+void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
+ enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
+ u16 peer_id;
+ u8 vdev_id;
+ u8 mac_addr[ETH_ALEN];
+ u16 peer_mac_h16;
+ u16 ast_hash;
+ u16 hw_peer_id;
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
+
+ switch (type) {
+ case HTT_T2H_MSG_TYPE_VERSION_CONF:
+ dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
+ resp->version_msg.version);
+ dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
+ resp->version_msg.version);
+ complete(&dp->htt_tgt_version_received);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP:
+ vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
+ resp->peer_map_ev.info);
+ peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
+ resp->peer_map_ev.info);
+ peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
+ resp->peer_map_ev.info1);
+ ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
+ peer_mac_h16, mac_addr);
+ ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP2:
+ vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
+ resp->peer_map_ev.info);
+ peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
+ resp->peer_map_ev.info);
+ peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
+ resp->peer_map_ev.info1);
+ ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
+ peer_mac_h16, mac_addr);
+ ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
+ resp->peer_map_ev.info2);
+ hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
+ resp->peer_map_ev.info1);
+ ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
+ hw_peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP:
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
+ peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
+ resp->peer_unmap_ev.info);
+ ath11k_peer_unmap_event(ab, peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
+ ath11k_htt_pull_ppdu_stats(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
+ ath11k_debugfs_htt_ext_stats_handler(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_PKTLOG:
+ ath11k_htt_pktlog(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
+ ath11k_htt_backpressure_event_handler(ab, skb);
+ break;
+ default:
+ ath11k_warn(ab, "htt event %d not handled\n", type);
+ break;
+ }
+
+ dev_kfree_skb_any(skb);
+}
+
+static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
+ struct sk_buff_head *msdu_list,
+ struct sk_buff *first, struct sk_buff *last,
+ u8 l3pad_bytes, int msdu_len)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct sk_buff *skb;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
+ int buf_first_hdr_len, buf_first_len;
+ struct hal_rx_desc *ldesc;
+ int space_extra, rem_len, buf_len;
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ /* As the msdu is spread across multiple rx buffers,
+ * find the offset to the start of msdu for computing
+ * the length of the msdu in the first buffer.
+ */
+ buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
+ buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
+
+ if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
+ skb_put(first, buf_first_hdr_len + msdu_len);
+ skb_pull(first, buf_first_hdr_len);
+ return 0;
+ }
+
+ ldesc = (struct hal_rx_desc *)last->data;
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);
+
+ /* MSDU spans over multiple buffers because the length of the MSDU
+ * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
+ * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
+ */
+ skb_put(first, DP_RX_BUFFER_SIZE);
+ skb_pull(first, buf_first_hdr_len);
+
+ /* When an MSDU spread over multiple buffers attention, MSDU_END and
+ * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
+ */
+ ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
+
+ space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
+ if (space_extra > 0 &&
+ (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
+ /* Free up all buffers of the MSDU */
+ while ((skb = __skb_dequeue(msdu_list)) != NULL) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (!rxcb->is_continuation) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ }
+ return -ENOMEM;
+ }
+
+ rem_len = msdu_len - buf_first_len;
+ while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (rxcb->is_continuation)
+ buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
+ else
+ buf_len = rem_len;
+
+ if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
+ WARN_ON_ONCE(1);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ skb_put(skb, buf_len + hal_rx_desc_sz);
+ skb_pull(skb, hal_rx_desc_sz);
+ skb_copy_from_linear_data(skb, skb_put(first, buf_len),
+ buf_len);
+ dev_kfree_skb_any(skb);
+
+ rem_len -= buf_len;
+ if (!rxcb->is_continuation)
+ break;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
+ struct sk_buff *first)
+{
+ struct sk_buff *skb;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
+
+ if (!rxcb->is_continuation)
+ return first;
+
+ skb_queue_walk(msdu_list, skb) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (!rxcb->is_continuation)
+ return skb;
+ }
+
+ return NULL;
+}
+
+static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct rx_attention *rx_attention;
+ bool ip_csum_fail, l4_csum_fail;
+
+ rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);
+ ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);
+ l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);
+
+ msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
+ CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+}
+
+int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return 0;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ return IEEE80211_CCMP_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ return IEEE80211_CCMP_256_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return IEEE80211_GCMP_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
+ return 0;
+}
+
+static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return 0;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return IEEE80211_TKIP_IV_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ return IEEE80211_CCMP_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ return IEEE80211_CCMP_256_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return IEEE80211_GCMP_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ return 0;
+}
+
+static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return 0;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return IEEE80211_TKIP_ICV_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ return 0;
+}
+
+static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
+ struct sk_buff *msdu,
+ u8 *first_hdr,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ u16 qos_ctl = 0;
+ u8 *qos;
+
+ /* copy SA & DA and pull decapped header */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
+ skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
+
+ if (rxcb->is_first_msdu) {
+ /* original 802.11 header is valid for the first msdu
+ * hence we can reuse the same header
+ */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ /* Each A-MSDU subframe will be reported as a separate MSDU,
+ * so strip the A-MSDU bit from QoS Ctl.
+ */
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
+ } else {
+ /* Rebuild qos header if this is a middle/last msdu */
+ hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+
+ /* Reset the order bit as the HT_Control header is stripped */
+ hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
+
+ qos_ctl = rxcb->tid;
+
+ if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
+ qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
+
+ /* TODO Add other QoS ctl fields when required */
+
+ /* copy decap header before overwriting for reuse below */
+ memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
+ }
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath11k_dp_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + hdr_len,
+ ath11k_dp_rx_crypto_param_len(ar, enctype));
+ }
+
+ if (!rxcb->is_first_msdu) {
+ memcpy(skb_push(msdu,
+ IEEE80211_QOS_CTL_LEN), &qos_ctl,
+ IEEE80211_QOS_CTL_LEN);
+ memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
+ return;
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status,
+ bool decrypted)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ size_t crypto_len;
+
+ if (!rxcb->is_first_msdu ||
+ !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ skb_trim(msdu, msdu->len - FCS_LEN);
+
+ if (!decrypted)
+ return;
+
+ hdr = (void *)msdu->data;
+
+ /* Tail */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_icv_len(ar, enctype));
+ } else {
+ /* MIC */
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+
+ /* ICV */
+ if (status->flag & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_icv_len(ar, enctype));
+ }
+
+ /* MMIC */
+ if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+ !ieee80211_has_morefrags(hdr->frame_control) &&
+ enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
+ skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
+
+ /* Head */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
+
+ memmove((void *)msdu->data + crypto_len,
+ (void *)msdu->data, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
+}
+
+static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
+ struct sk_buff *msdu,
+ enum hal_encrypt_type enctype)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len, crypto_len;
+ void *rfc1042;
+ bool is_amsdu;
+
+ is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
+ hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);
+ rfc1042 = hdr;
+
+ if (rxcb->is_first_msdu) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
+
+ rfc1042 += hdr_len + crypto_len;
+ }
+
+ if (is_amsdu)
+ rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
+
+ return rfc1042;
+}
+
+static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
+ struct sk_buff *msdu,
+ u8 *first_hdr,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_hdr *hdr;
+ struct ethhdr *eth;
+ size_t hdr_len;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ void *rfc1042;
+
+ rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
+ if (WARN_ON_ONCE(!rfc1042))
+ return;
+
+ /* pull decapped header and copy SA & DA */
+ eth = (struct ethhdr *)msdu->data;
+ ether_addr_copy(da, eth->h_dest);
+ ether_addr_copy(sa, eth->h_source);
+ skb_pull(msdu, sizeof(struct ethhdr));
+
+ /* push rfc1042/llc/snap */
+ memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
+ sizeof(struct ath11k_dp_rfc1042_hdr));
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath11k_dp_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + hdr_len,
+ ath11k_dp_rx_crypto_param_len(ar, enctype));
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status,
+ bool decrypted)
+{
+ u8 *first_hdr;
+ u8 decap;
+ struct ethhdr *ehdr;
+
+ first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
+ decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
+
+ switch (decap) {
+ case DP_RX_DECAP_TYPE_NATIVE_WIFI:
+ ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
+ enctype, status);
+ break;
+ case DP_RX_DECAP_TYPE_RAW:
+ ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
+ decrypted);
+ break;
+ case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
+ ehdr = (struct ethhdr *)msdu->data;
+
+ /* mac80211 allows fast path only for authorized STA */
+ if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
+ ATH11K_SKB_RXCB(msdu)->is_eapol = true;
+ ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
+ enctype, status);
+ break;
+ }
+
+ /* PN for mcast packets will be validated in mac80211;
+ * remove eth header and add 802.11 header.
+ */
+ if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
+ ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
+ enctype, status);
+ break;
+ case DP_RX_DECAP_TYPE_8023:
+ /* TODO: Handle undecap for these formats */
+ break;
+ }
+}
+
+static struct ath11k_peer *
+ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct hal_rx_desc *rx_desc = rxcb->rx_desc;
+ struct ath11k_peer *peer = NULL;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (rxcb->peer_id)
+ peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);
+
+ if (peer)
+ return peer;
+
+ if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
+ return NULL;
+
+ peer = ath11k_peer_find_by_addr(ab,
+ ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));
+ return peer;
+}
+
+static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ bool fill_crypto_hdr;
+ enum hal_encrypt_type enctype;
+ bool is_decrypted = false;
+ struct ath11k_skb_rxcb *rxcb;
+ struct ieee80211_hdr *hdr;
+ struct ath11k_peer *peer;
+ struct rx_attention *rx_attention;
+ u32 err_bitmap;
+
+ /* PN for multicast packets will be checked in mac80211 */
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
+ rxcb->is_mcbc = fill_crypto_hdr;
+
+ if (rxcb->is_mcbc) {
+ rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
+ rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
+ }
+
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
+ if (peer) {
+ if (rxcb->is_mcbc)
+ enctype = peer->sec_type_grp;
+ else
+ enctype = peer->sec_type;
+ } else {
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
+ err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
+ if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
+
+ /* Clear per-MPDU flags while leaving per-PPDU flags intact */
+ rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
+ RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED);
+
+ if (err_bitmap & DP_RX_MPDU_ERR_FCS)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (is_decrypted) {
+ rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
+
+ if (fill_crypto_hdr)
+ rx_status->flag |= RX_FLAG_MIC_STRIPPED |
+ RX_FLAG_ICV_STRIPPED;
+ else
+ rx_status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_PN_VALIDATED;
+ }
+
+ ath11k_dp_rx_h_csum_offload(ar, msdu);
+ ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ enctype, rx_status, is_decrypted);
+
+ if (!is_decrypted || fill_crypto_hdr)
+ return;
+
+ if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=
+ DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
+ hdr = (void *)msdu->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
+}
+
+static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_supported_band *sband;
+ enum rx_msdu_start_pkt_type pkt_type;
+ u8 bw;
+ u8 rate_mcs, nss;
+ u8 sgi;
+ bool is_cck, is_ldpc;
+
+ pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
+ bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
+ rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);
+ nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);
+ sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);
+
+ switch (pkt_type) {
+ case RX_MSDU_START_PKT_TYPE_11A:
+ case RX_MSDU_START_PKT_TYPE_11B:
+ is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
+ sband = &ar->mac.sbands[rx_status->band];
+ rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
+ is_cck);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11N:
+ rx_status->encoding = RX_ENC_HT;
+ if (rate_mcs > ATH11K_HT_MCS_MAX) {
+ ath11k_warn(ar->ab,
+ "Received with invalid mcs in HT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AC:
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH11K_VHT_MCS_MAX) {
+ ath11k_warn(ar->ab,
+ "Received with invalid mcs in VHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->nss = nss;
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
+ if (is_ldpc)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AX:
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH11K_HE_MCS_MAX) {
+ ath11k_warn(ar->ab,
+ "Received with invalid mcs in HE mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->nss = nss;
+ rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
+ rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ break;
+ }
+}
+
+static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ u8 channel_num;
+ u32 center_freq, meta_data;
+ struct ieee80211_channel *channel;
+
+ rx_status->freq = 0;
+ rx_status->rate_idx = 0;
+ rx_status->nss = 0;
+ rx_status->encoding = RX_ENC_LEGACY;
+ rx_status->bw = RATE_INFO_BW_20;
+
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);
+ channel_num = meta_data;
+ center_freq = meta_data >> 16;
+
+ if (center_freq >= ATH11K_MIN_6G_FREQ &&
+ center_freq <= ATH11K_MAX_6G_FREQ) {
+ rx_status->band = NL80211_BAND_6GHZ;
+ rx_status->freq = center_freq;
+ } else if (channel_num >= 1 && channel_num <= 14) {
+ rx_status->band = NL80211_BAND_2GHZ;
+ } else if (channel_num >= 36 && channel_num <= 177) {
+ rx_status->band = NL80211_BAND_5GHZ;
+ } else {
+ spin_lock_bh(&ar->data_lock);
+ channel = ar->rx_channel;
+ if (channel) {
+ rx_status->band = channel->band;
+ channel_num =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ }
+ spin_unlock_bh(&ar->data_lock);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
+ rx_desc, sizeof(struct hal_rx_desc));
+ }
+
+ if (rx_status->band != NL80211_BAND_6GHZ)
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
+
+ ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
+}
+
+static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
+ };
+ struct ieee80211_rx_status *rx_status;
+ struct ieee80211_radiotap_he *he = NULL;
+ struct ieee80211_sta *pubsta = NULL;
+ struct ath11k_peer *peer;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ u8 decap = DP_RX_DECAP_TYPE_RAW;
+ bool is_mcbc = rxcb->is_mcbc;
+ bool is_eapol = rxcb->is_eapol;
+
+ if (status->encoding == RX_ENC_HE &&
+ !(status->flag & RX_FLAG_RADIOTAP_HE) &&
+ !(status->flag & RX_FLAG_SKIP_MONITOR)) {
+ he = skb_push(msdu, sizeof(known));
+ memcpy(he, &known, sizeof(known));
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+ }
+
+ if (!(status->flag & RX_FLAG_ONLY_MONITOR))
+ decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);
+
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
+ if (peer && peer->sta)
+ pubsta = peer->sta;
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ msdu,
+ msdu->len,
+ peer ? peer->addr : NULL,
+ rxcb->tid,
+ is_mcbc ? "mcast" : "ucast",
+ rxcb->seq_no,
+ (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
+ (status->encoding == RX_ENC_HT) ? "ht" : "",
+ (status->encoding == RX_ENC_VHT) ? "vht" : "",
+ (status->encoding == RX_ENC_HE) ? "he" : "",
+ (status->bw == RATE_INFO_BW_40) ? "40" : "",
+ (status->bw == RATE_INFO_BW_80) ? "80" : "",
+ (status->bw == RATE_INFO_BW_160) ? "160" : "",
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->nss,
+ status->freq,
+ status->band, status->flag,
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+ !!(status->flag & RX_FLAG_MMIC_ERROR),
+ !!(status->flag & RX_FLAG_AMSDU_MORE));
+
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
+ msdu->data, msdu->len);
+
+ rx_status = IEEE80211_SKB_RXCB(msdu);
+ *rx_status = *status;
+
+ /* TODO: trace rx packet */
+
+ /* PN for multicast packets are not validate in HW,
+ * so skip 802.3 rx path
+ * Also, fast_rx expects the STA to be authorized, hence
+ * eapol packets are sent in slow path.
+ */
+ if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
+ !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
+ rx_status->flag |= RX_FLAG_8023;
+
+ ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+}
+
+static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct sk_buff_head *msdu_list,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct hal_rx_desc *rx_desc, *lrx_desc;
+ struct rx_attention *rx_attention;
+ struct ath11k_skb_rxcb *rxcb;
+ struct sk_buff *last_buf;
+ u8 l3_pad_bytes;
+ u8 *hdr_status;
+ u16 msdu_len;
+ int ret;
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
+ if (!last_buf) {
+ ath11k_warn(ab,
+ "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {
+ ath11k_warn(ar->ab, "msdu len not valid\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ lrx_desc = (struct hal_rx_desc *)last_buf->data;
+ rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
+ if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
+ ath11k_warn(ab, "msdu_done bit in attention is not set\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ rxcb->rx_desc = rx_desc;
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);
+ l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);
+
+ if (rxcb->is_frag) {
+ skb_pull(msdu, hal_rx_desc_sz);
+ } else if (!rxcb->is_continuation) {
+ if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
+ hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
+ ret = -EINVAL;
+ ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);
+ ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
+ sizeof(struct ieee80211_hdr));
+ ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
+ sizeof(struct hal_rx_desc));
+ goto free_out;
+ }
+ skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
+ } else {
+ ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
+ msdu, last_buf,
+ l3_pad_bytes, msdu_len);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to coalesce msdu rx buffer%d\n", ret);
+ goto free_out;
+ }
+ }
+
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
+ ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
+
+ rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
+
+ return 0;
+
+free_out:
+ return ret;
+}
+
+static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
+ struct napi_struct *napi,
+ struct sk_buff_head *msdu_list,
+ int mac_id)
+{
+ struct sk_buff *msdu;
+ struct ath11k *ar;
+ struct ieee80211_rx_status rx_status = {};
+ int ret;
+
+ if (skb_queue_empty(msdu_list))
+ return;
+
+ if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {
+ __skb_queue_purge(msdu_list);
+ return;
+ }
+
+ ar = ab->pdevs[mac_id].ar;
+ if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {
+ __skb_queue_purge(msdu_list);
+ return;
+ }
+
+ while ((msdu = __skb_dequeue(msdu_list))) {
+ ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
+ if (unlikely(ret)) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "Unable to process msdu %d", ret);
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
+ }
+}
+
+int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring;
+ int num_buffs_reaped[MAX_RADIOS] = {};
+ struct sk_buff_head msdu_list[MAX_RADIOS];
+ struct ath11k_skb_rxcb *rxcb;
+ int total_msdu_reaped = 0;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ bool done = false;
+ int buf_id, mac_id;
+ struct ath11k *ar;
+ struct hal_reo_dest_ring *desc;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 cookie;
+ int i;
+
+ for (i = 0; i < MAX_RADIOS; i++)
+ __skb_queue_head_init(&msdu_list[i]);
+
+ srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+try_again:
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (likely(desc =
+ (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
+ srng))) {
+ cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ desc->buf_addr_info.info1);
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ cookie);
+ mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
+
+ if (unlikely(buf_id == 0))
+ continue;
+
+ ar = ab->pdevs[mac_id].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (unlikely(!msdu)) {
+ ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped[mac_id]++;
+
+ push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
+ desc->info0);
+ if (unlikely(push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
+ dev_kfree_skb_any(msdu);
+ ab->soc_stats.hal_reo_error[ring_id]++;
+ continue;
+ }
+
+ rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
+ RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
+ rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
+ RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
+ rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
+ RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
+ rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
+ desc->rx_mpdu_info.meta_data);
+ rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
+ desc->rx_mpdu_info.info0);
+ rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
+ desc->info0);
+
+ rxcb->mac_id = mac_id;
+ __skb_queue_tail(&msdu_list[mac_id], msdu);
+
+ if (rxcb->is_continuation) {
+ done = false;
+ } else {
+ total_msdu_reaped++;
+ done = true;
+ }
+
+ if (total_msdu_reaped >= budget)
+ break;
+ }
+
+ /* Hw might have updated the head pointer after we cached it.
+ * In this case, even though there are entries in the ring we'll
+ * get rx_desc NULL. Give the read another try with updated cached
+ * head pointer so that we can reap complete MPDU in the current
+ * rx processing.
+ */
+ if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {
+ ath11k_hal_srng_access_end(ab, srng);
+ goto try_again;
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (unlikely(!total_msdu_reaped))
+ goto exit;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!num_buffs_reaped[i])
+ continue;
+
+ ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);
+
+ ar = ab->pdevs[i].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
+ ab->hw_params.hal_params->rx_buf_rbm);
+ }
+exit:
+ return total_msdu_reaped;
+}
+
+static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
+ u32 num_msdu;
+ int i;
+
+ if (!rx_stats)
+ return;
+
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+ ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
+
+ num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
+ ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
+
+ rx_stats->num_msdu += num_msdu;
+ rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
+ ppdu_info->tcp_ack_msdu_count;
+ rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
+ rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
+
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
+ ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
+ ppdu_info->nss = 1;
+ ppdu_info->mcs = HAL_RX_MAX_MCS;
+ ppdu_info->tid = IEEE80211_NUM_TIDS;
+ }
+
+ if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
+ rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
+
+ if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
+ rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
+
+ if (ppdu_info->gi < HAL_RX_GI_MAX)
+ rx_stats->gi_count[ppdu_info->gi] += num_msdu;
+
+ if (ppdu_info->bw < HAL_RX_BW_MAX)
+ rx_stats->bw_count[ppdu_info->bw] += num_msdu;
+
+ if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
+ rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
+
+ if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
+ rx_stats->tid_count[ppdu_info->tid] += num_msdu;
+
+ if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
+ rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
+
+ if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
+ rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
+
+ if (ppdu_info->is_stbc)
+ rx_stats->stbc_count += num_msdu;
+
+ if (ppdu_info->beamformed)
+ rx_stats->beamformed_count += num_msdu;
+
+ if (ppdu_info->num_mpdu_fcs_ok > 1)
+ rx_stats->ampdu_msdu_count += num_msdu;
+ else
+ rx_stats->non_ampdu_msdu_count += num_msdu;
+
+ rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
+ rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
+ rx_stats->dcm_count += ppdu_info->dcm;
+ rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
+
+ BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
+ ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
+
+ for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++)
+ arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i];
+
+ rx_stats->rx_duration += ppdu_info->rx_duration;
+ arsta->rx_duration = rx_stats->rx_duration;
+}
+
+static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
+ struct dp_rxdma_ring *rx_ring,
+ int *buf_id)
+{
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
+ DP_RX_BUFFER_ALIGN_SIZE);
+
+ if (!skb)
+ goto fail_alloc_skb;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ DP_RX_BUFFER_ALIGN_SIZE)) {
+ skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ab->dev, paddr)))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max, GFP_ATOMIC);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (*buf_id < 0)
+ goto fail_dma_unmap;
+
+ ATH11K_SKB_RXCB(skb)->paddr = paddr;
+ return skb;
+
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+fail_alloc_skb:
+ return NULL;
+}
+
+int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr)
+{
+ struct hal_srng *srng;
+ u32 *desc;
+ struct sk_buff *skb;
+ int num_free;
+ int num_remain;
+ int buf_id;
+ u32 cookie;
+ dma_addr_t paddr;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
+ &buf_id);
+ if (!skb)
+ break;
+ paddr = ATH11K_SKB_RXCB(skb)->paddr;
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_desc_get;
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ num_remain--;
+
+ ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_desc_get:
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
+#define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
+
+static void
+ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
+ struct hal_tlv_hdr *tlv)
+{
+ struct hal_rx_ppdu_start *ppdu_start;
+ u16 ppdu_id_diff, ppdu_id, tlv_len;
+ u8 *ptr;
+
+ /* PPDU id is part of second tlv, move ptr to second tlv */
+ tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
+ ptr = (u8 *)tlv;
+ ptr += sizeof(*tlv) + tlv_len;
+ tlv = (struct hal_tlv_hdr *)ptr;
+
+ if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)
+ return;
+
+ ptr += sizeof(*tlv);
+ ppdu_start = (struct hal_rx_ppdu_start *)ptr;
+ ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
+ __le32_to_cpu(ppdu_start->info0));
+
+ if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {
+ pmon->buf_state = DP_MON_STATUS_LEAD;
+ ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;
+ if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
+ pmon->buf_state = DP_MON_STATUS_LAG;
+ } else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {
+ pmon->buf_state = DP_MON_STATUS_LAG;
+ ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;
+ if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
+ pmon->buf_state = DP_MON_STATUS_LEAD;
+ }
+}
+
+static enum dp_mon_status_buf_state
+ath11k_dp_rx_mon_buf_done(struct ath11k_base *ab, struct hal_srng *srng,
+ struct dp_rxdma_ring *rx_ring)
+{
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_tlv_hdr *tlv;
+ struct sk_buff *skb;
+ void *status_desc;
+ dma_addr_t paddr;
+ u32 cookie;
+ int buf_id;
+ u8 rbm;
+
+ status_desc = ath11k_hal_srng_src_next_peek(ab, srng);
+ if (!status_desc)
+ return DP_MON_STATUS_NO_DMA;
+
+ ath11k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm);
+
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ if (!skb)
+ return DP_MON_STATUS_NO_DMA;
+
+ rxcb = ATH11K_SKB_RXCB(skb);
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ tlv = (struct hal_tlv_hdr *)skb->data;
+ if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE)
+ return DP_MON_STATUS_NO_DMA;
+
+ return DP_MON_STATUS_REPLINISH;
+}
+
+static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
+ int *budget, struct sk_buff_head *skb_list)
+{
+ struct ath11k *ar;
+ const struct ath11k_hw_hal_params *hal_params;
+ enum dp_mon_status_buf_state reap_status;
+ struct ath11k_pdev_dp *dp;
+ struct dp_rxdma_ring *rx_ring;
+ struct ath11k_mon_data *pmon;
+ struct hal_srng *srng;
+ void *rx_mon_status_desc;
+ struct sk_buff *skb;
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_tlv_hdr *tlv;
+ u32 cookie;
+ int buf_id, srng_id;
+ dma_addr_t paddr;
+ u8 rbm;
+ int num_buffs_reaped = 0;
+
+ ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
+ dp = &ar->dp;
+ pmon = &dp->mon_data;
+ srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
+ rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+ while (*budget) {
+ *budget -= 1;
+ rx_mon_status_desc =
+ ath11k_hal_srng_src_peek(ab, srng);
+ if (!rx_mon_status_desc) {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ break;
+ }
+
+ ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
+ &cookie, &rbm);
+ if (paddr) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ if (!skb) {
+ ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
+ buf_id);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ goto move_next;
+ }
+
+ rxcb = ATH11K_SKB_RXCB(skb);
+
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ tlv = (struct hal_tlv_hdr *)skb->data;
+ if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
+ HAL_RX_STATUS_BUFFER_DONE) {
+ ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n",
+ FIELD_GET(HAL_TLV_HDR_TAG,
+ tlv->tl), buf_id);
+ /* RxDMA status done bit might not be set even
+ * though tp is moved by HW.
+ */
+
+ /* If done status is missing:
+ * 1. As per MAC team's suggestion,
+ * when HP + 1 entry is peeked and if DMA
+ * is not done and if HP + 2 entry's DMA done
+ * is set. skip HP + 1 entry and
+ * start processing in next interrupt.
+ * 2. If HP + 2 entry's DMA done is not set,
+ * poll onto HP + 1 entry DMA done to be set.
+ * Check status for same buffer for next time
+ * dp_rx_mon_status_srng_process
+ */
+
+ reap_status = ath11k_dp_rx_mon_buf_done(ab, srng,
+ rx_ring);
+ if (reap_status == DP_MON_STATUS_NO_DMA)
+ continue;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ goto move_next;
+ }
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (ab->hw_params.full_monitor_mode) {
+ ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
+ if (paddr == pmon->mon_status_paddr)
+ pmon->buf_state = DP_MON_STATUS_MATCH;
+ }
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ __skb_queue_tail(skb_list, skb);
+ } else {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ }
+move_next:
+ skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
+ &buf_id);
+
+ if (!skb) {
+ hal_params = ab->hw_params.hal_params;
+ ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
+ hal_params->rx_buf_rbm);
+ num_buffs_reaped++;
+ break;
+ }
+ rxcb = ATH11K_SKB_RXCB(skb);
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
+ cookie,
+ ab->hw_params.hal_params->rx_buf_rbm);
+ ath11k_hal_srng_src_get_next_entry(ab, srng);
+ num_buffs_reaped++;
+ }
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return num_buffs_reaped;
+}
+
+static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
+{
+ struct dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer,
+ frag_timer);
+
+ spin_lock_bh(&rx_tid->ab->base_lock);
+ if (rx_tid->last_frag_no &&
+ rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
+ spin_unlock_bh(&rx_tid->ab->base_lock);
+ return;
+ }
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+ spin_unlock_bh(&rx_tid->ab->base_lock);
+}
+
+int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct crypto_shash *tfm;
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ int i;
+
+ tfm = crypto_alloc_shash("michael_mic", 0, 0);
+ if (IS_ERR(tfm)) {
+ ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n",
+ PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
+ spin_unlock_bh(&ab->base_lock);
+ crypto_free_shash(tfm);
+ return -ENOENT;
+ }
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+ rx_tid->ab = ab;
+ timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
+ skb_queue_head_init(&rx_tid->rx_frags);
+ }
+
+ peer->tfm_mmic = tfm;
+ peer->dp_setup_done = true;
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+}
+
+static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
+ struct ieee80211_hdr *hdr, u8 *data,
+ size_t data_len, u8 *mic)
+{
+ SHASH_DESC_ON_STACK(desc, tfm);
+ u8 mic_hdr[16] = {};
+ u8 tid = 0;
+ int ret;
+
+ if (!tfm)
+ return -EINVAL;
+
+ desc->tfm = tfm;
+
+ ret = crypto_shash_setkey(tfm, key, 8);
+ if (ret)
+ goto out;
+
+ ret = crypto_shash_init(desc);
+ if (ret)
+ goto out;
+
+ /* TKIP MIC header */
+ memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
+ memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ mic_hdr[12] = tid;
+
+ ret = crypto_shash_update(desc, mic_hdr, 16);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(desc, data, data_len);
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(desc, mic);
+out:
+ shash_desc_zero(desc);
+ return ret;
+}
+
+static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
+ struct sk_buff *msdu)
+{
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
+ struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
+ struct ieee80211_key_conf *key_conf;
+ struct ieee80211_hdr *hdr;
+ u8 mic[IEEE80211_CCMP_MIC_LEN];
+ int head_len, tail_len, ret;
+ size_t data_len;
+ u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+ u8 *key, *data;
+ u8 key_idx;
+
+ if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=
+ HAL_ENCRYPT_TYPE_TKIP_MIC)
+ return 0;
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
+ tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
+
+ if (!is_multicast_ether_addr(hdr->addr1))
+ key_idx = peer->ucast_keyidx;
+ else
+ key_idx = peer->mcast_keyidx;
+
+ key_conf = peer->keys[key_idx];
+
+ data = msdu->data + head_len;
+ data_len = msdu->len - head_len - tail_len;
+ key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
+
+ ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
+ if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
+ goto mic_fail;
+
+ return 0;
+
+mic_fail:
+ (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
+ (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
+
+ rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
+ skb_pull(msdu, hal_rx_desc_sz);
+
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+ ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
+ ieee80211_rx(ar->hw, msdu);
+ return -EINVAL;
+}
+
+static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
+ enum hal_encrypt_type enctype, u32 flags)
+{
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ size_t crypto_len;
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ if (!flags)
+ return;
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
+
+ if (flags & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+
+ if (flags & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_icv_len(ar, enctype));
+
+ if (flags & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
+
+ memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,
+ (void *)msdu->data + hal_rx_desc_sz, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
+}
+
+static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
+ struct ath11k_peer *peer,
+ struct dp_rx_tid *rx_tid,
+ struct sk_buff **defrag_skb)
+{
+ struct hal_rx_desc *rx_desc;
+ struct sk_buff *skb, *first_frag, *last_frag;
+ struct ieee80211_hdr *hdr;
+ struct rx_attention *rx_attention;
+ enum hal_encrypt_type enctype;
+ bool is_decrypted = false;
+ int msdu_len = 0;
+ int extra_space;
+ u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ first_frag = skb_peek(&rx_tid->rx_frags);
+ last_frag = skb_peek_tail(&rx_tid->rx_frags);
+
+ skb_queue_walk(&rx_tid->rx_frags, skb) {
+ flags = 0;
+ rx_desc = (struct hal_rx_desc *)skb->data;
+ hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
+
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
+ if (enctype != HAL_ENCRYPT_TYPE_OPEN) {
+ rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
+ }
+
+ if (is_decrypted) {
+ if (skb != first_frag)
+ flags |= RX_FLAG_IV_STRIPPED;
+ if (skb != last_frag)
+ flags |= RX_FLAG_ICV_STRIPPED |
+ RX_FLAG_MIC_STRIPPED;
+ }
+
+ /* RX fragments are always raw packets */
+ if (skb != last_frag)
+ skb_trim(skb, skb->len - FCS_LEN);
+ ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
+
+ if (skb != first_frag)
+ skb_pull(skb, hal_rx_desc_sz +
+ ieee80211_hdrlen(hdr->frame_control));
+ msdu_len += skb->len;
+ }
+
+ extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
+ if (extra_space > 0 &&
+ (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
+ return -ENOMEM;
+
+ __skb_unlink(first_frag, &rx_tid->rx_frags);
+ while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
+ skb_put_data(first_frag, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ }
+
+ hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
+ ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
+
+ if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
+ first_frag = NULL;
+
+ *defrag_skb = first_frag;
+ return 0;
+}
+
+static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
+ struct sk_buff *defrag_skb)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
+ struct hal_reo_entrance_ring *reo_ent_ring;
+ struct hal_reo_dest_ring *reo_dest_ring;
+ struct dp_link_desc_bank *link_desc_banks;
+ struct hal_rx_msdu_link *msdu_link;
+ struct hal_rx_msdu_details *msdu0;
+ struct hal_srng *srng;
+ dma_addr_t paddr;
+ u32 desc_bank, msdu_info, mpdu_info;
+ u32 dst_idx, cookie, hal_rx_desc_sz;
+ int ret, buf_id;
+
+ hal_rx_desc_sz = ab->hw_params.hal_desc_sz;
+ link_desc_banks = ab->dp.link_desc_banks;
+ reo_dest_ring = rx_tid->dst_ring_desc;
+
+ ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
+ msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr));
+ msdu0 = &msdu_link->msdu_link[0];
+ dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
+ memset(msdu0, 0, sizeof(*msdu0));
+
+ msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
+ defrag_skb->len - hal_rx_desc_sz) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
+ msdu0->rx_msdu_info.info0 = msdu_info;
+
+ /* change msdu len in hal rx desc */
+ ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
+
+ paddr = dma_map_single(ab->dev, defrag_skb->data,
+ defrag_skb->len + skb_tailroom(defrag_skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr))
+ return -ENOMEM;
+
+ spin_lock_bh(&rx_refill_ring->idr_lock);
+ buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
+ rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
+ spin_unlock_bh(&rx_refill_ring->idr_lock);
+ if (buf_id < 0) {
+ ret = -ENOMEM;
+ goto err_unmap_dma;
+ }
+
+ ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie,
+ ab->hw_params.hal_params->rx_buf_rbm);
+
+ /* Fill mpdu details into reo entrance ring */
+ srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ reo_ent_ring = (struct hal_reo_entrance_ring *)
+ ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!reo_ent_ring) {
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+ ret = -ENOSPC;
+ goto err_free_idr;
+ }
+ memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
+
+ ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
+ ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
+ HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
+
+ mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
+
+ reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
+ reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
+ reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
+ reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
+ FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
+ reo_dest_ring->info0)) |
+ FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return 0;
+
+err_free_idr:
+ spin_lock_bh(&rx_refill_ring->idr_lock);
+ idr_remove(&rx_refill_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_refill_ring->idr_lock);
+err_unmap_dma:
+ dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
+ DMA_TO_DEVICE);
+ return ret;
+}
+
+static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,
+ struct sk_buff *a, struct sk_buff *b)
+{
+ int frag1, frag2;
+
+ frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);
+ frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);
+
+ return frag1 - frag2;
+}
+
+static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,
+ struct sk_buff_head *frag_list,
+ struct sk_buff *cur_frag)
+{
+ struct sk_buff *skb;
+ int cmp;
+
+ skb_queue_walk(frag_list, skb) {
+ cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);
+ if (cmp < 0)
+ continue;
+ __skb_queue_before(frag_list, skb, cur_frag);
+ return;
+ }
+ __skb_queue_tail(frag_list, cur_frag);
+}
+
+static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ u64 pn = 0;
+ u8 *ehdr;
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
+ ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
+
+ pn = ehdr[0];
+ pn |= (u64)ehdr[1] << 8;
+ pn |= (u64)ehdr[4] << 16;
+ pn |= (u64)ehdr[5] << 24;
+ pn |= (u64)ehdr[6] << 32;
+ pn |= (u64)ehdr[7] << 40;
+
+ return pn;
+}
+
+static bool
+ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
+{
+ enum hal_encrypt_type encrypt_type;
+ struct sk_buff *first_frag, *skb;
+ struct hal_rx_desc *desc;
+ u64 last_pn;
+ u64 cur_pn;
+
+ first_frag = skb_peek(&rx_tid->rx_frags);
+ desc = (struct hal_rx_desc *)first_frag->data;
+
+ encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);
+ if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
+ return true;
+
+ last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);
+ skb_queue_walk(&rx_tid->rx_frags, skb) {
+ if (skb == first_frag)
+ continue;
+
+ cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);
+ if (cur_pn != last_pn + 1)
+ return false;
+ last_pn = cur_pn;
+ }
+ return true;
+}
+
+static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ u32 *ring_desc)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct hal_rx_desc *rx_desc;
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ struct sk_buff *defrag_skb = NULL;
+ u32 peer_id;
+ u16 seqno, frag_no;
+ u8 tid;
+ int ret = 0;
+ bool more_frags;
+ bool is_mcbc;
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
+ tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);
+ seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
+ frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
+ more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
+ is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
+
+ /* Multicast/Broadcast fragments are not expected */
+ if (is_mcbc)
+ return -EINVAL;
+
+ if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
+ !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
+ tid > IEEE80211_NUM_TIDS)
+ return -EINVAL;
+
+ /* received unfragmented packet in reo
+ * exception ring, this shouldn't happen
+ * as these packets typically come from
+ * reo2sw srngs.
+ */
+ if (WARN_ON_ONCE(!frag_no && !more_frags))
+ return -EINVAL;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, peer_id);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
+ peer_id);
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+ if (!peer->dp_setup_done) {
+ ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
+ peer->addr, peer_id);
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ rx_tid = &peer->rx_tid[tid];
+
+ if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
+ skb_queue_empty(&rx_tid->rx_frags)) {
+ /* Flush stored fragments and start a new sequence */
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+ rx_tid->cur_sn = seqno;
+ }
+
+ if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
+ /* Fragment already present */
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap)))
+ __skb_queue_tail(&rx_tid->rx_frags, msdu);
+ else
+ ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
+
+ rx_tid->rx_frag_bitmap |= BIT(frag_no);
+ if (!more_frags)
+ rx_tid->last_frag_no = frag_no;
+
+ if (frag_no == 0) {
+ rx_tid->dst_ring_desc = kmemdup(ring_desc,
+ sizeof(*rx_tid->dst_ring_desc),
+ GFP_ATOMIC);
+ if (!rx_tid->dst_ring_desc) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ } else {
+ ath11k_dp_rx_link_desc_return(ab, ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ if (!rx_tid->last_frag_no ||
+ rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
+ mod_timer(&rx_tid->frag_timer, jiffies +
+ ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
+ goto out_unlock;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ timer_delete_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_id(ab, peer_id);
+ if (!peer)
+ goto err_frags_cleanup;
+
+ if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
+ goto err_frags_cleanup;
+
+ if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
+ goto err_frags_cleanup;
+
+ if (!defrag_skb)
+ goto err_frags_cleanup;
+
+ if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
+ goto err_frags_cleanup;
+
+ ath11k_dp_rx_frags_cleanup(rx_tid, false);
+ goto out_unlock;
+
+err_frags_cleanup:
+ dev_kfree_skb_any(defrag_skb);
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+out_unlock:
+ spin_unlock_bh(&ab->base_lock);
+ return ret;
+}
+
+static int
+ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ struct sk_buff *msdu;
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_rx_desc *rx_desc;
+ u8 *hdr_status;
+ u16 msdu_len;
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ return -EINVAL;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ if (drop) {
+ dev_kfree_skb_any(msdu);
+ return 0;
+ }
+
+ rcu_read_lock();
+ if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);
+ if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
+ hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
+ ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
+ sizeof(struct ieee80211_hdr));
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
+ sizeof(struct hal_rx_desc));
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ skb_put(msdu, hal_rx_desc_sz + msdu_len);
+
+ if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
+ dev_kfree_skb_any(msdu);
+ ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+exit:
+ rcu_read_unlock();
+ return 0;
+}
+
+int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
+ int budget)
+{
+ u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ struct dp_link_desc_bank *link_desc_banks;
+ enum hal_rx_buf_return_buf_manager rbm;
+ int tot_n_bufs_reaped, quota, ret, i;
+ int n_bufs_reaped[MAX_RADIOS] = {};
+ struct dp_rxdma_ring *rx_ring;
+ struct dp_srng *reo_except;
+ u32 desc_bank, num_msdus;
+ struct hal_srng *srng;
+ struct ath11k_dp *dp;
+ void *link_desc_va;
+ int buf_id, mac_id;
+ struct ath11k *ar;
+ dma_addr_t paddr;
+ u32 *desc;
+ bool is_frag;
+ u8 drop = 0;
+
+ tot_n_bufs_reaped = 0;
+ quota = budget;
+
+ dp = &ab->dp;
+ reo_except = &dp->reo_except_ring;
+ link_desc_banks = dp->link_desc_banks;
+
+ srng = &ab->hal.srng_list[reo_except->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (budget &&
+ (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
+
+ ab->soc_stats.err_ring_pkts++;
+ ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
+ &desc_bank);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse error reo desc %d\n",
+ ret);
+ continue;
+ }
+ link_desc_va = link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr);
+ ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
+ &rbm);
+ if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
+ rbm != HAL_RX_BUF_RBM_SW1_BM &&
+ rbm != HAL_RX_BUF_RBM_SW3_BM) {
+ ab->soc_stats.invalid_rbm++;
+ ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_REL_MSDU);
+ continue;
+ }
+
+ is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
+
+ /* Process only rx fragments with one msdu per link desc below, and drop
+ * msdu's indicated due to error reasons.
+ */
+ if (!is_frag || num_msdus > 1) {
+ drop = 1;
+ /* Return the link desc back to wbm idle list */
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ for (i = 0; i < num_msdus; i++) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_cookies[i]);
+
+ mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
+ msdu_cookies[i]);
+
+ ar = ab->pdevs[mac_id].ar;
+
+ if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
+ n_bufs_reaped[mac_id]++;
+ tot_n_bufs_reaped++;
+ }
+ }
+
+ if (tot_n_bufs_reaped >= quota) {
+ tot_n_bufs_reaped = quota;
+ goto exit;
+ }
+
+ budget = quota - tot_n_bufs_reaped;
+ }
+
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!n_bufs_reaped[i])
+ continue;
+
+ ar = ab->pdevs[i].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
+ ab->hw_params.hal_params->rx_buf_rbm);
+ }
+
+ return tot_n_bufs_reaped;
+}
+
+static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
+ int msdu_len,
+ struct sk_buff_head *msdu_list)
+{
+ struct sk_buff *skb, *tmp;
+ struct ath11k_skb_rxcb *rxcb;
+ int n_buffs;
+
+ n_buffs = DIV_ROUND_UP(msdu_len,
+ (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));
+
+ skb_queue_walk_safe(msdu_list, skb, tmp) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
+ rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
+ if (!n_buffs)
+ break;
+ __skb_unlink(skb, msdu_list);
+ dev_kfree_skb_any(skb);
+ n_buffs--;
+ }
+ }
+}
+
+static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ struct sk_buff_head *msdu_list)
+{
+ u16 msdu_len;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ struct rx_attention *rx_attention;
+ u8 l3pad_bytes;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
+
+ if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
+ /* First buffer will be freed by the caller, so deduct it's length */
+ msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
+ ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
+ return -EINVAL;
+ }
+
+ rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);
+ if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
+ ath11k_warn(ar->ab,
+ "msdu_done bit not set in null_q_des processing\n");
+ __skb_queue_purge(msdu_list);
+ return -EIO;
+ }
+
+ /* Handle NULL queue descriptor violations arising out a missing
+ * REO queue for a given peer or a given TID. This typically
+ * may happen if a packet is received on a QOS enabled TID before the
+ * ADDBA negotiation for that TID, when the TID queue is setup. Or
+ * it may also happen for MC/BC frames if they are not routed to the
+ * non-QOS TID queue, in the absence of any other default TID queue.
+ * This error can show up both in a REO destination or WBM release ring.
+ */
+
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
+
+ if (rxcb->is_frag) {
+ skb_pull(msdu, hal_rx_desc_sz);
+ } else {
+ l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
+
+ if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
+ return -EINVAL;
+
+ skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+ }
+ ath11k_dp_rx_h_ppdu(ar, desc, status);
+
+ ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
+
+ rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);
+
+ /* Please note that caller will having the access to msdu and completing
+ * rx with mac80211. Need not worry about cleaning up amsdu_list.
+ */
+
+ return 0;
+}
+
+static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ bool drop = false;
+
+ ar->ab->soc_stats.reo_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
+ if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
+ drop = true;
+ break;
+ case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
+ /* TODO: Do not drop PN failed packets in the driver;
+ * instead, it is good to drop such packets in mac80211
+ * after incrementing the replay counters.
+ */
+ fallthrough;
+ default:
+ /* TODO: Review other errors and process them to mac80211
+ * as appropriate.
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ u16 msdu_len;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ u8 l3pad_bytes;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
+
+ l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
+ skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+
+ ath11k_dp_rx_h_ppdu(ar, desc, status);
+
+ status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED);
+
+ ath11k_dp_rx_h_undecap(ar, msdu, desc,
+ HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
+}
+
+static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ bool drop = false;
+
+ ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
+ ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
+ break;
+ default:
+ /* TODO: Review other rxdma error code to check if anything is
+ * worth reporting to mac80211
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
+ struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct ieee80211_rx_status rxs = {};
+ bool drop = true;
+
+ switch (rxcb->err_rel_src) {
+ case HAL_WBM_REL_SRC_MODULE_REO:
+ drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
+ break;
+ case HAL_WBM_REL_SRC_MODULE_RXDMA:
+ drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
+ break;
+ default:
+ /* msdu will get freed */
+ break;
+ }
+
+ if (drop) {
+ dev_kfree_skb_any(msdu);
+ return;
+ }
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
+}
+
+int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar;
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring;
+ struct hal_rx_wbm_rel_info err_info;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ struct sk_buff_head msdu_list[MAX_RADIOS];
+ struct ath11k_skb_rxcb *rxcb;
+ u32 *rx_desc;
+ int buf_id, mac_id;
+ int num_buffs_reaped[MAX_RADIOS] = {};
+ int total_num_buffs_reaped = 0;
+ int ret, i;
+
+ for (i = 0; i < ab->num_radios; i++)
+ __skb_queue_head_init(&msdu_list[i]);
+
+ srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (budget) {
+ rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
+ if (!rx_desc)
+ break;
+
+ ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to parse rx error in wbm_rel ring desc %d\n",
+ ret);
+ continue;
+ }
+
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
+ mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
+
+ ar = ab->pdevs[mac_id].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
+ buf_id, mac_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped[mac_id]++;
+ total_num_buffs_reaped++;
+ budget--;
+
+ if (err_info.push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ rxcb->err_rel_src = err_info.err_rel_src;
+ rxcb->err_code = err_info.err_code;
+ rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
+ __skb_queue_tail(&msdu_list[mac_id], msdu);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (!total_num_buffs_reaped)
+ goto done;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!num_buffs_reaped[i])
+ continue;
+
+ ar = ab->pdevs[i].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
+ ab->hw_params.hal_params->rx_buf_rbm);
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!rcu_dereference(ab->pdevs_active[i])) {
+ __skb_queue_purge(&msdu_list[i]);
+ continue;
+ }
+
+ ar = ab->pdevs[i].ar;
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ __skb_queue_purge(&msdu_list[i]);
+ continue;
+ }
+
+ while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
+ ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
+ }
+ rcu_read_unlock();
+done:
+ return total_num_buffs_reaped;
+}
+
+int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
+{
+ struct ath11k *ar;
+ struct dp_srng *err_ring;
+ struct dp_rxdma_ring *rx_ring;
+ struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
+ struct hal_srng *srng;
+ u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ enum hal_rx_buf_return_buf_manager rbm;
+ enum hal_reo_entr_rxdma_ecode rxdma_err_code;
+ struct ath11k_skb_rxcb *rxcb;
+ struct sk_buff *skb;
+ struct hal_reo_entrance_ring *entr_ring;
+ void *desc;
+ int num_buf_freed = 0;
+ int quota = budget;
+ dma_addr_t paddr;
+ u32 desc_bank;
+ void *link_desc_va;
+ int num_msdus;
+ int i;
+ int buf_id;
+
+ ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
+ err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
+ mac_id)];
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ srng = &ab->hal.srng_list[err_ring->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (quota-- &&
+ (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
+
+ entr_ring = (struct hal_reo_entrance_ring *)desc;
+ rxdma_err_code =
+ FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
+ entr_ring->info1);
+ ab->soc_stats.rxdma_error[rxdma_err_code]++;
+
+ link_desc_va = link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr);
+ ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
+ msdu_cookies, &rbm);
+
+ for (i = 0; i < num_msdus; i++) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_cookies[i]);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!skb) {
+ ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(skb);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+
+ num_buf_freed++;
+ }
+
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (num_buf_freed)
+ ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
+ ab->hw_params.hal_params->rx_buf_rbm);
+
+ return budget - quota;
+}
+
+void ath11k_dp_process_reo_status(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ struct dp_reo_cmd *cmd, *tmp;
+ bool found = false;
+ u32 *reo_desc;
+ u16 tag;
+ struct hal_reo_status reo_status;
+
+ srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
+
+ memset(&reo_status, 0, sizeof(reo_status));
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
+
+ switch (tag) {
+ case HAL_REO_GET_QUEUE_STATS_STATUS:
+ ath11k_hal_reo_status_queue_stats(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_QUEUE_STATUS:
+ ath11k_hal_reo_flush_queue_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_CACHE_STATUS:
+ ath11k_hal_reo_flush_cache_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_UNBLOCK_CACHE_STATUS:
+ ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
+ ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
+ ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
+ ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
+ &reo_status);
+ break;
+ default:
+ ath11k_warn(ab, "Unknown reo status type %d\n", tag);
+ continue;
+ }
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
+ found = true;
+ list_del(&cmd->list);
+ break;
+ }
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ if (found) {
+ cmd->handler(dp, (void *)&cmd->data,
+ reo_status.uniform_hdr.cmd_status);
+ kfree(cmd);
+ }
+
+ found = false;
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+}
+
+void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+
+ ath11k_dp_rx_pdev_srng_free(ar);
+ ath11k_dp_rxdma_pdev_buf_free(ar);
+}
+
+int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ u32 ring_id;
+ int i;
+ int ret;
+
+ ret = ath11k_dp_rx_pdev_srng_alloc(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rx srngs\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rxdma ring\n");
+ return ret;
+ }
+
+ ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ab->hw_params.rx_mac_buf_ring) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ ring_id = dp->rx_mac_buf_ring[i].ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id + i, HAL_RXDMA_BUF);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ ring_id = dp->rxdma_err_dst_ring[i].ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id + i, HAL_RXDMA_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ if (!ab->hw_params.rxdma1_enable)
+ goto config_refill_ring;
+
+ ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id, HAL_RXDMA_MONITOR_BUF);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+ ret = ath11k_dp_tx_htt_srng_setup(ab,
+ dp->rxdma_mon_dst_ring.ring_id,
+ mac_id, HAL_RXDMA_MONITOR_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
+ ret);
+ return ret;
+ }
+ ret = ath11k_dp_tx_htt_srng_setup(ab,
+ dp->rxdma_mon_desc_ring.ring_id,
+ mac_id, HAL_RXDMA_MONITOR_DESC);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
+ ret);
+ return ret;
+ }
+
+config_refill_ring:
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to configure mon_status_refill_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
+{
+ if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
+ *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
+ *total_len -= *frag_len;
+ } else {
+ *frag_len = *total_len;
+ *total_len = 0;
+ }
+}
+
+static
+int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
+ void *p_last_buf_addr_info,
+ u8 mac_id)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_srng *dp_srng;
+ void *hal_srng;
+ void *src_srng_desc;
+ int ret = 0;
+
+ if (ar->ab->hw_params.rxdma1_enable) {
+ dp_srng = &dp->rxdma_mon_desc_ring;
+ hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
+ } else {
+ dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
+ hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
+ }
+
+ ath11k_hal_srng_access_begin(ar->ab, hal_srng);
+
+ src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
+
+ if (src_srng_desc) {
+ struct ath11k_buffer_addr *src_desc = src_srng_desc;
+
+ *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "Monitor Link Desc Ring %d Full", mac_id);
+ ret = -ENOMEM;
+ }
+
+ ath11k_hal_srng_access_end(ar->ab, hal_srng);
+ return ret;
+}
+
+static
+void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
+ dma_addr_t *paddr, u32 *sw_cookie,
+ u8 *rbm,
+ void **pp_buf_addr_info)
+{
+ struct hal_rx_msdu_link *msdu_link = rx_msdu_link_desc;
+ struct ath11k_buffer_addr *buf_addr_info;
+
+ buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
+
+ ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
+
+ *pp_buf_addr_info = (void *)buf_addr_info;
+}
+
+static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
+{
+ if (skb->len > len) {
+ skb_trim(skb, len);
+ } else {
+ if (skb_tailroom(skb) < len - skb->len) {
+ if ((pskb_expand_head(skb, 0,
+ len - skb->len - skb_tailroom(skb),
+ GFP_ATOMIC))) {
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+ }
+ skb_put(skb, (len - skb->len));
+ }
+ return 0;
+}
+
+static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
+ void *msdu_link_desc,
+ struct hal_rx_msdu_list *msdu_list,
+ u16 *num_msdus)
+{
+ struct hal_rx_msdu_details *msdu_details = NULL;
+ struct rx_msdu_desc *msdu_desc_info = NULL;
+ struct hal_rx_msdu_link *msdu_link = NULL;
+ int i;
+ u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
+ u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
+ u8 tmp = 0;
+
+ msdu_link = msdu_link_desc;
+ msdu_details = &msdu_link->msdu_link[0];
+
+ for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
+ if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ msdu_details[i].buf_addr_info.info0) == 0) {
+ msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
+ msdu_desc_info->info0 |= last;
+ break;
+ }
+ msdu_desc_info = &msdu_details[i].rx_msdu_info;
+
+ if (!i)
+ msdu_desc_info->info0 |= first;
+ else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
+ msdu_desc_info->info0 |= last;
+ msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
+ msdu_list->msdu_info[i].msdu_len =
+ HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
+ msdu_list->sw_cookie[i] =
+ FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ msdu_details[i].buf_addr_info.info1);
+ tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ msdu_details[i].buf_addr_info.info1);
+ msdu_list->rbm[i] = tmp;
+ }
+ *num_msdus = i;
+}
+
+static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
+ u32 *rx_bufs_used)
+{
+ u32 ret = 0;
+
+ if ((*ppdu_id < msdu_ppdu_id) &&
+ ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ } else if ((*ppdu_id > msdu_ppdu_id) &&
+ ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ /* mon_dst is behind than mon_status
+ * skip dst_ring and free it
+ */
+ *rx_bufs_used += 1;
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ }
+ return ret;
+}
+
+static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
+ bool *is_frag, u32 *total_len,
+ u32 *frag_len, u32 *msdu_cnt)
+{
+ if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
+ if (!*is_frag) {
+ *total_len = info->msdu_len;
+ *is_frag = true;
+ }
+ ath11k_dp_mon_set_frag_len(total_len,
+ frag_len);
+ } else {
+ if (*is_frag) {
+ ath11k_dp_mon_set_frag_len(total_len,
+ frag_len);
+ } else {
+ *frag_len = info->msdu_len;
+ }
+ *is_frag = false;
+ *msdu_cnt -= 1;
+ }
+}
+
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu, u32 *npackets,
+ u32 *ppdu_id)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
+ struct sk_buff *msdu = NULL, *last = NULL;
+ struct hal_rx_msdu_list msdu_list;
+ void *p_buf_addr_info, *p_last_buf_addr_info;
+ struct hal_rx_desc *rx_desc;
+ void *rx_msdu_link_desc;
+ dma_addr_t paddr;
+ u16 num_msdus = 0;
+ u32 rx_buf_size, rx_pkt_offset, sw_cookie;
+ u32 rx_bufs_used = 0, i = 0;
+ u32 msdu_ppdu_id = 0, msdu_cnt = 0;
+ u32 total_len = 0, frag_len = 0;
+ bool is_frag, is_first_msdu;
+ bool drop_mpdu = false;
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_reo_entrance_ring *ent_desc = ring_entry;
+ int buf_id;
+ u32 rx_link_buf_info[2];
+ u8 rbm;
+
+ if (!ar->ab->hw_params.rxdma1_enable)
+ rx_ring = &dp->rx_refill_buf_ring;
+
+ ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
+ &sw_cookie,
+ &p_last_buf_addr_info, &rbm,
+ &msdu_cnt);
+
+ if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
+ ent_desc->info1) ==
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ u8 rxdma_err =
+ FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
+ ent_desc->info1);
+ if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dest_mpdu_drop++;
+ }
+ }
+
+ is_frag = false;
+ is_first_msdu = true;
+
+ do {
+ if (pmon->mon_last_linkdesc_paddr == paddr) {
+ pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
+ return rx_bufs_used;
+ }
+
+ if (ar->ab->hw_params.rxdma1_enable)
+ rx_msdu_link_desc =
+ (void *)pmon->link_desc_banks[sw_cookie].vaddr +
+ (paddr - pmon->link_desc_banks[sw_cookie].paddr);
+ else
+ rx_msdu_link_desc =
+ (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
+ (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
+
+ ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
+ &num_msdus);
+
+ for (i = 0; i < num_msdus; i++) {
+ u32 l2_hdr_offset;
+
+ if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "i %d last_cookie %d is same\n",
+ i, pmon->mon_last_buf_cookie);
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dup_mon_buf_cnt++;
+ continue;
+ }
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_list.sw_cookie[i]);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (!msdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "msdu_pop: invalid buf_id %d\n", buf_id);
+ goto next_msdu;
+ }
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ if (!rxcb->unmapped) {
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len +
+ skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ rxcb->unmapped = 1;
+ }
+ if (drop_mpdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "i %d drop msdu %p *ppdu_id %x\n",
+ i, msdu, *ppdu_id);
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
+
+ if (is_first_msdu) {
+ if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ pmon->mon_last_linkdesc_paddr = paddr;
+ goto next_msdu;
+ }
+
+ msdu_ppdu_id =
+ ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
+
+ if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
+ ppdu_id,
+ &rx_bufs_used)) {
+ if (rx_bufs_used) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+ return rx_bufs_used;
+ }
+ pmon->mon_last_linkdesc_paddr = paddr;
+ is_first_msdu = false;
+ }
+ ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
+ &is_frag, &total_len,
+ &frag_len, &msdu_cnt);
+ rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
+
+ ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
+
+ if (!(*head_msdu))
+ *head_msdu = msdu;
+ else if (last)
+ last->next = msdu;
+
+ last = msdu;
+next_msdu:
+ pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
+ rx_bufs_used++;
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ }
+
+ ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
+
+ ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
+ &sw_cookie, &rbm,
+ &p_buf_addr_info);
+
+ if (ar->ab->hw_params.rxdma1_enable) {
+ if (ath11k_dp_rx_monitor_link_desc_return(ar,
+ p_last_buf_addr_info,
+ dp->mac_id))
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "dp_rx_monitor_link_desc_return failed");
+ } else {
+ ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ p_last_buf_addr_info = p_buf_addr_info;
+
+ } while (paddr && msdu_cnt);
+
+ if (last)
+ last->next = NULL;
+
+ *tail_msdu = msdu;
+
+ if (msdu_cnt == 0)
+ *npackets = 1;
+
+ return rx_bufs_used;
+}
+
+static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)
+{
+ u32 rx_pkt_offset, l2_hdr_offset;
+
+ rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,
+ (struct hal_rx_desc *)msdu->data);
+ skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
+}
+
+static struct sk_buff *
+ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
+ u32 mac_id, struct sk_buff *head_msdu,
+ struct sk_buff *last_msdu,
+ struct ieee80211_rx_status *rxs, bool *fcs_err)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct sk_buff *msdu, *prev_buf;
+ struct hal_rx_desc *rx_desc;
+ char *hdr_desc;
+ u8 *dest, decap_format;
+ struct ieee80211_hdr_3addr *wh;
+ struct rx_attention *rx_attention;
+ u32 err_bitmap;
+
+ if (!head_msdu)
+ goto err_merge_fail;
+
+ rx_desc = (struct hal_rx_desc *)head_msdu->data;
+ rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
+ err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
+
+ if (err_bitmap & DP_RX_MPDU_ERR_FCS)
+ *fcs_err = true;
+
+ if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
+ return NULL;
+
+ decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);
+
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+
+ if (decap_format == DP_RX_DECAP_TYPE_RAW) {
+ ath11k_dp_rx_msdus_set_payload(ar, head_msdu);
+
+ prev_buf = head_msdu;
+ msdu = head_msdu->next;
+
+ while (msdu) {
+ ath11k_dp_rx_msdus_set_payload(ar, msdu);
+
+ prev_buf = msdu;
+ msdu = msdu->next;
+ }
+
+ prev_buf->next = NULL;
+
+ skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
+ } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
+ u8 qos_pkt = 0;
+
+ rx_desc = (struct hal_rx_desc *)head_msdu->data;
+ hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
+
+ /* Base size */
+ wh = (struct ieee80211_hdr_3addr *)hdr_desc;
+
+ if (ieee80211_is_data_qos(wh->frame_control))
+ qos_pkt = 1;
+
+ msdu = head_msdu;
+
+ while (msdu) {
+ ath11k_dp_rx_msdus_set_payload(ar, msdu);
+ if (qos_pkt) {
+ dest = skb_push(msdu, sizeof(__le16));
+ if (!dest)
+ goto err_merge_fail;
+ memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
+ }
+ prev_buf = msdu;
+ msdu = msdu->next;
+ }
+ dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
+ if (!dest)
+ goto err_merge_fail;
+
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "mpdu_buf %p mpdu_buf->len %u",
+ prev_buf, prev_buf->len);
+ } else {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "decap format %d is not supported!\n",
+ decap_format);
+ goto err_merge_fail;
+ }
+
+ return head_msdu;
+
+err_merge_fail:
+ return NULL;
+}
+
+static void
+ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
+ u8 *rtap_buf)
+{
+ u32 rtap_len = 0;
+
+ put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
+}
+
+static void
+ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
+ u8 *rtap_buf)
+{
+ u32 rtap_len = 0;
+
+ put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[0];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[1];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[2];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[3];
+}
+
+static void ath11k_update_radiotap(struct ath11k *ar,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct sk_buff *mon_skb,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ieee80211_supported_band *sband;
+ u8 *ptr = NULL;
+
+ rxs->flag |= RX_FLAG_MACTIME_START;
+ rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
+
+ if (ppduinfo->nss)
+ rxs->nss = ppduinfo->nss;
+
+ if (ppduinfo->he_mu_flags) {
+ rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ rxs->encoding = RX_ENC_HE;
+ ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
+ ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
+ } else if (ppduinfo->he_flags) {
+ rxs->flag |= RX_FLAG_RADIOTAP_HE;
+ rxs->encoding = RX_ENC_HE;
+ ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
+ ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
+ rxs->rate_idx = ppduinfo->rate;
+ } else if (ppduinfo->vht_flags) {
+ rxs->encoding = RX_ENC_VHT;
+ rxs->rate_idx = ppduinfo->rate;
+ } else if (ppduinfo->ht_flags) {
+ rxs->encoding = RX_ENC_HT;
+ rxs->rate_idx = ppduinfo->rate;
+ } else {
+ rxs->encoding = RX_ENC_LEGACY;
+ sband = &ar->mac.sbands[rxs->band];
+ rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
+ ppduinfo->cck_flag);
+ }
+
+ rxs->mactime = ppduinfo->tsft;
+}
+
+static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
+ struct sk_buff *head_msdu,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct sk_buff *tail_msdu,
+ struct napi_struct *napi)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct sk_buff *mon_skb, *skb_next, *header;
+ struct ieee80211_rx_status *rxs = &dp->rx_status;
+ bool fcs_err = false;
+
+ mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
+ tail_msdu, rxs, &fcs_err);
+
+ if (!mon_skb)
+ goto mon_deliver_fail;
+
+ header = mon_skb;
+
+ rxs->flag = 0;
+
+ if (fcs_err)
+ rxs->flag = RX_FLAG_FAILED_FCS_CRC;
+
+ do {
+ skb_next = mon_skb->next;
+ if (!skb_next)
+ rxs->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ rxs->flag |= RX_FLAG_AMSDU_MORE;
+
+ if (mon_skb == header) {
+ header = NULL;
+ rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+ rxs->flag |= RX_FLAG_ONLY_MONITOR;
+ ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
+ mon_skb = skb_next;
+ } while (mon_skb);
+ rxs->flag = 0;
+
+ return 0;
+
+mon_deliver_fail:
+ mon_skb = head_msdu;
+ while (mon_skb) {
+ skb_next = mon_skb->next;
+ dev_kfree_skb_any(mon_skb);
+ mon_skb = skb_next;
+ }
+ return -EINVAL;
+}
+
+/* The destination ring processing is stuck if the destination is not
+ * moving while status ring moves 16 PPDU. The destination ring processing
+ * skips this destination ring PPDU as a workaround.
+ */
+#define MON_DEST_RING_STUCK_MAX_CNT 16
+
+static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
+ u32 quota, struct napi_struct *napi)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ const struct ath11k_hw_hal_params *hal_params;
+ void *ring_entry;
+ struct hal_srng *mon_dst_srng;
+ u32 ppdu_id;
+ u32 rx_bufs_used;
+ u32 ring_id;
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+ u32 npackets = 0;
+ u32 mpdu_rx_bufs_used;
+
+ if (ar->ab->hw_params.rxdma1_enable)
+ ring_id = dp->rxdma_mon_dst_ring.ring_id;
+ else
+ ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
+
+ mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
+
+ spin_lock_bh(&pmon->mon_lock);
+
+ spin_lock_bh(&mon_dst_srng->lock);
+ ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
+
+ ppdu_id = pmon->mon_ppdu_info.ppdu_id;
+ rx_bufs_used = 0;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
+ struct sk_buff *head_msdu, *tail_msdu;
+
+ head_msdu = NULL;
+ tail_msdu = NULL;
+
+ mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
+ &head_msdu,
+ &tail_msdu,
+ &npackets, &ppdu_id);
+
+ rx_bufs_used += mpdu_rx_bufs_used;
+
+ if (mpdu_rx_bufs_used) {
+ dp->mon_dest_ring_stuck_cnt = 0;
+ } else {
+ dp->mon_dest_ring_stuck_cnt++;
+ rx_mon_stats->dest_mon_not_reaped++;
+ }
+
+ if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
+ rx_mon_stats->dest_mon_stuck++;
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
+ pmon->mon_ppdu_info.ppdu_id, ppdu_id,
+ dp->mon_dest_ring_stuck_cnt,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
+ pmon->mon_ppdu_info.ppdu_id = ppdu_id;
+ continue;
+ }
+
+ if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
+ ppdu_id, pmon->mon_ppdu_info.ppdu_id,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
+ break;
+ }
+ if (head_msdu && tail_msdu) {
+ ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
+ &pmon->mon_ppdu_info,
+ tail_msdu, napi);
+ rx_mon_stats->dest_mpdu_done++;
+ }
+
+ ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
+ mon_dst_srng);
+ }
+ ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+ spin_unlock_bh(&mon_dst_srng->lock);
+
+ spin_unlock_bh(&pmon->mon_lock);
+
+ if (rx_bufs_used) {
+ rx_mon_stats->dest_ppdu_done++;
+ hal_params = ar->ab->hw_params.hal_params;
+
+ if (ar->ab->hw_params.rxdma1_enable)
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
+ &dp->rxdma_mon_buf_ring,
+ rx_bufs_used,
+ hal_params->rx_buf_rbm);
+ else
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
+ &dp->rx_refill_buf_ring,
+ rx_bufs_used,
+ hal_params->rx_buf_rbm);
+ }
+}
+
+int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
+ enum hal_rx_mon_status hal_status;
+ struct sk_buff *skb;
+ struct sk_buff_head skb_list;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ int num_buffs_reaped = 0;
+ u32 rx_buf_sz;
+ u16 log_type;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
+ struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+
+ __skb_queue_head_init(&skb_list);
+
+ num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
+ &skb_list);
+ if (!num_buffs_reaped)
+ goto exit;
+
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+
+ while ((skb = __skb_dequeue(&skb_list))) {
+ if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
+ rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
+ } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ } else {
+ log_type = ATH11K_PKTLOG_TYPE_INVALID;
+ rx_buf_sz = 0;
+ }
+
+ if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+ hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
+ pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
+ hal_status == HAL_TLV_STATUS_PPDU_DONE) {
+ rx_mon_stats->status_ppdu_done++;
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
+ if (!ab->hw_params.full_monitor_mode) {
+ ath11k_dp_rx_mon_dest_process(ar, mac_id,
+ budget, napi);
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ }
+ }
+
+ if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
+ hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
+
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "failed to find the peer with peer_id %d\n",
+ ppdu_info->peer_id);
+ goto next_skb;
+ }
+
+ arsta = ath11k_sta_to_arsta(peer->sta);
+ ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
+
+ if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+
+next_skb:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+
+ dev_kfree_skb_any(skb);
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+ }
+exit:
+ return num_buffs_reaped;
+}
+
+static u32
+ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu,
+ struct hal_sw_mon_ring_entries *sw_mon_entries)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
+ struct sk_buff *msdu = NULL, *last = NULL;
+ struct hal_sw_monitor_ring *sw_desc = ring_entry;
+ struct hal_rx_msdu_list msdu_list;
+ struct hal_rx_desc *rx_desc;
+ struct ath11k_skb_rxcb *rxcb;
+ void *rx_msdu_link_desc;
+ void *p_buf_addr_info, *p_last_buf_addr_info;
+ int buf_id, i = 0;
+ u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;
+ u32 rx_bufs_used = 0, msdu_cnt = 0;
+ u32 total_len = 0, frag_len = 0, sw_cookie;
+ u16 num_msdus = 0;
+ u8 rxdma_err, rbm;
+ bool is_frag, is_first_msdu;
+ bool drop_mpdu = false;
+
+ ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);
+
+ sw_cookie = sw_mon_entries->mon_dst_sw_cookie;
+ sw_mon_entries->end_of_ppdu = false;
+ sw_mon_entries->drop_ppdu = false;
+ p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;
+ msdu_cnt = sw_mon_entries->msdu_cnt;
+
+ sw_mon_entries->end_of_ppdu =
+ FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);
+ if (sw_mon_entries->end_of_ppdu)
+ return rx_bufs_used;
+
+ if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,
+ sw_desc->info0) ==
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ rxdma_err =
+ FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,
+ sw_desc->info0);
+ if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
+ pmon->rx_mon_stats.dest_mpdu_drop++;
+ drop_mpdu = true;
+ }
+ }
+
+ is_frag = false;
+ is_first_msdu = true;
+
+ do {
+ rx_msdu_link_desc =
+ (u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
+ (sw_mon_entries->mon_dst_paddr -
+ pmon->link_desc_banks[sw_cookie].paddr);
+
+ ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
+ &num_msdus);
+
+ for (i = 0; i < num_msdus; i++) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_list.sw_cookie[i]);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "full mon msdu_pop: invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ goto next_msdu;
+ }
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ if (!rxcb->unmapped) {
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len +
+ skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ rxcb->unmapped = 1;
+ }
+ if (drop_mpdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "full mon: i %d drop msdu %p *ppdu_id %x\n",
+ i, msdu, sw_mon_entries->ppdu_id);
+ dev_kfree_skb_any(msdu);
+ msdu_cnt--;
+ goto next_msdu;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
+
+ if (is_first_msdu) {
+ if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+ is_first_msdu = false;
+ }
+
+ ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
+ &is_frag, &total_len,
+ &frag_len, &msdu_cnt);
+
+ rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
+
+ ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
+
+ if (!(*head_msdu))
+ *head_msdu = msdu;
+ else if (last)
+ last->next = msdu;
+
+ last = msdu;
+next_msdu:
+ rx_bufs_used++;
+ }
+
+ ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,
+ &sw_mon_entries->mon_dst_paddr,
+ &sw_mon_entries->mon_dst_sw_cookie,
+ &rbm,
+ &p_buf_addr_info);
+
+ if (ath11k_dp_rx_monitor_link_desc_return(ar,
+ p_last_buf_addr_info,
+ dp->mac_id))
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "full mon: dp_rx_monitor_link_desc_return failed\n");
+
+ p_last_buf_addr_info = p_buf_addr_info;
+
+ } while (sw_mon_entries->mon_dst_paddr && msdu_cnt);
+
+ if (last)
+ last->next = NULL;
+
+ *tail_msdu = msdu;
+
+ return rx_bufs_used;
+}
+
+static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,
+ struct dp_full_mon_mpdu *mon_mpdu,
+ struct sk_buff *head,
+ struct sk_buff *tail)
+{
+ mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
+ if (!mon_mpdu)
+ return -ENOMEM;
+
+ list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);
+ mon_mpdu->head = head;
+ mon_mpdu->tail = tail;
+
+ return 0;
+}
+
+static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,
+ struct dp_full_mon_mpdu *mon_mpdu)
+{
+ struct dp_full_mon_mpdu *tmp;
+ struct sk_buff *tmp_msdu, *skb_next;
+
+ if (list_empty(&dp->dp_full_mon_mpdu_list))
+ return;
+
+ list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
+ list_del(&mon_mpdu->list);
+
+ tmp_msdu = mon_mpdu->head;
+ while (tmp_msdu) {
+ skb_next = tmp_msdu->next;
+ dev_kfree_skb_any(tmp_msdu);
+ tmp_msdu = skb_next;
+ }
+
+ kfree(mon_mpdu);
+ }
+}
+
+static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
+ int mac_id,
+ struct ath11k_mon_data *pmon,
+ struct napi_struct *napi)
+{
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+ struct dp_full_mon_mpdu *tmp;
+ struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
+ struct sk_buff *head_msdu, *tail_msdu;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ int ret;
+
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
+ list_del(&mon_mpdu->list);
+ head_msdu = mon_mpdu->head;
+ tail_msdu = mon_mpdu->tail;
+ if (head_msdu && tail_msdu) {
+ ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
+ &pmon->mon_ppdu_info,
+ tail_msdu, napi);
+ rx_mon_stats->dest_mpdu_done++;
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
+ }
+ kfree(mon_mpdu);
+ }
+
+ return ret;
+}
+
+static int
+ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct hal_sw_mon_ring_entries *sw_mon_entries;
+ int quota = 0, work = 0, count;
+
+ sw_mon_entries = &pmon->sw_mon_entries;
+
+ while (pmon->hold_mon_dst_ring) {
+ quota = ath11k_dp_rx_process_mon_status(ab, mac_id,
+ napi, 1);
+ if (pmon->buf_state == DP_MON_STATUS_MATCH) {
+ count = sw_mon_entries->status_buf_count;
+ if (count > 1) {
+ quota += ath11k_dp_rx_process_mon_status(ab, mac_id,
+ napi, count);
+ }
+
+ ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,
+ pmon, napi);
+ pmon->hold_mon_dst_ring = false;
+ } else if (!pmon->mon_status_paddr ||
+ pmon->buf_state == DP_MON_STATUS_LEAD) {
+ sw_mon_entries->drop_ppdu = true;
+ pmon->hold_mon_dst_ring = false;
+ }
+
+ if (!quota)
+ break;
+
+ work += quota;
+ }
+
+ if (sw_mon_entries->drop_ppdu)
+ ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);
+
+ return work;
+}
+
+static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct hal_sw_mon_ring_entries *sw_mon_entries;
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+ struct sk_buff *head_msdu, *tail_msdu;
+ struct hal_srng *mon_dst_srng;
+ void *ring_entry;
+ u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
+ int quota = 0, ret;
+ bool break_dst_ring = false;
+
+ spin_lock_bh(&pmon->mon_lock);
+
+ sw_mon_entries = &pmon->sw_mon_entries;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ if (pmon->hold_mon_dst_ring) {
+ spin_unlock_bh(&pmon->mon_lock);
+ goto reap_status_ring;
+ }
+
+ mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+ spin_lock_bh(&mon_dst_srng->lock);
+
+ ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
+ while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
+ head_msdu = NULL;
+ tail_msdu = NULL;
+
+ mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,
+ &head_msdu,
+ &tail_msdu,
+ sw_mon_entries);
+ rx_bufs_used += mpdu_rx_bufs_used;
+
+ if (!sw_mon_entries->end_of_ppdu) {
+ if (head_msdu) {
+ ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,
+ pmon->mon_mpdu,
+ head_msdu,
+ tail_msdu);
+ if (ret)
+ break_dst_ring = true;
+ }
+
+ goto next_entry;
+ } else {
+ if (!sw_mon_entries->ppdu_id &&
+ !sw_mon_entries->mon_status_paddr) {
+ break_dst_ring = true;
+ goto next_entry;
+ }
+ }
+
+ rx_mon_stats->dest_ppdu_done++;
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ pmon->buf_state = DP_MON_STATUS_LAG;
+ pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;
+ pmon->hold_mon_dst_ring = true;
+next_entry:
+ ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
+ mon_dst_srng);
+ if (break_dst_ring)
+ break;
+ }
+
+ ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+ spin_unlock_bh(&mon_dst_srng->lock);
+ spin_unlock_bh(&pmon->mon_lock);
+
+ if (rx_bufs_used) {
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
+ &dp->rxdma_mon_buf_ring,
+ rx_bufs_used,
+ HAL_RX_BUF_RBM_SW3_BM);
+ }
+
+reap_status_ring:
+ quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,
+ napi, budget);
+
+ return quota;
+}
+
+int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
+ int ret = 0;
+
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
+ ab->hw_params.full_monitor_mode)
+ ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
+ else
+ ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
+
+ return ret;
+}
+
+static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+
+ memset(&pmon->rx_mon_stats, 0,
+ sizeof(pmon->rx_mon_stats));
+ return 0;
+}
+
+int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct hal_srng *mon_desc_srng = NULL;
+ struct dp_srng *dp_srng;
+ int ret = 0;
+ u32 n_link_desc = 0;
+
+ ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
+ return ret;
+ }
+
+ /* if rxdma1_enable is false, no need to setup
+ * rxdma_mon_desc_ring.
+ */
+ if (!ar->ab->hw_params.rxdma1_enable)
+ return 0;
+
+ dp_srng = &dp->rxdma_mon_desc_ring;
+ n_link_desc = dp_srng->size /
+ ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
+ mon_desc_srng =
+ &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
+
+ ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
+ HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
+ n_link_desc);
+ if (ret) {
+ ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
+ return ret;
+ }
+ pmon->mon_last_linkdesc_paddr = 0;
+ pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
+ spin_lock_init(&pmon->mon_lock);
+
+ return 0;
+}
+
+static int ath11k_dp_mon_link_free(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+
+ ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
+ HAL_RXDMA_MONITOR_DESC,
+ &dp->rxdma_mon_desc_ring);
+ return 0;
+}
+
+int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
+{
+ ath11k_dp_mon_link_free(ar);
+ return 0;
+}
+
+int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab)
+{
+ /* start reap timer */
+ mod_timer(&ab->mon_reap_timer,
+ jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
+
+ return 0;
+}
+
+int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)
+{
+ int ret;
+
+ if (stop_timer)
+ timer_delete_sync(&ab->mon_reap_timer);
+
+ /* reap all the monitor related rings */
+ ret = ath11k_dp_purge_mon_ring(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
new file mode 100644
index 000000000000..c322e30caa96
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH11K_DP_RX_H
+#define ATH11K_DP_RX_H
+
+#include "core.h"
+#include "rx_desc.h"
+#include "debug.h"
+
+#define DP_MAX_NWIFI_HDR_LEN 30
+
+#define DP_RX_MPDU_ERR_FCS BIT(0)
+#define DP_RX_MPDU_ERR_DECRYPT BIT(1)
+#define DP_RX_MPDU_ERR_TKIP_MIC BIT(2)
+#define DP_RX_MPDU_ERR_AMSDU_ERR BIT(3)
+#define DP_RX_MPDU_ERR_OVERFLOW BIT(4)
+#define DP_RX_MPDU_ERR_MSDU_LEN BIT(5)
+#define DP_RX_MPDU_ERR_MPDU_LEN BIT(6)
+#define DP_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
+
+enum dp_rx_decap_type {
+ DP_RX_DECAP_TYPE_RAW,
+ DP_RX_DECAP_TYPE_NATIVE_WIFI,
+ DP_RX_DECAP_TYPE_ETHERNET2_DIX,
+ DP_RX_DECAP_TYPE_8023,
+};
+
+struct ath11k_dp_amsdu_subframe_hdr {
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 len;
+} __packed;
+
+struct ath11k_dp_rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params);
+int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params);
+int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
+ const u8 *peer_addr,
+ enum set_key_cmd key_cmd,
+ struct ieee80211_key_conf *key);
+void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer);
+void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer);
+void ath11k_peer_rx_tid_delete(struct ath11k *ar,
+ struct ath11k_peer *peer, u8 tid);
+int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
+ u8 tid, u32 ba_win_sz, u16 ssn,
+ enum hal_pn_type pn_type);
+void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
+ struct sk_buff *skb);
+int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab);
+void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab);
+int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int pdev_idx);
+void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int pdev_idx);
+void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab);
+void ath11k_dp_process_reo_status(struct ath11k_base *ab);
+int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget);
+int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
+ struct napi_struct *napi, int budget);
+int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
+ int budget);
+int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi,
+ int budget);
+int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr);
+int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data);
+int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget);
+int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget);
+int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr);
+int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar);
+int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar);
+int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id);
+
+int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab);
+int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer);
+
+int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype);
+
+#endif /* ATH11K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
new file mode 100644
index 000000000000..562aba66582f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -0,0 +1,1306 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include "core.h"
+#include "dp_tx.h"
+#include "debug.h"
+#include "debugfs_sta.h"
+#include "hw.h"
+#include "peer.h"
+#include "mac.h"
+
+static enum hal_tcl_encap_type
+ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath11k_base *ab = arvif->ar->ab;
+
+ if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ return HAL_TCL_ENCAP_TYPE_RAW;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+ return HAL_TCL_ENCAP_TYPE_ETHERNET;
+
+ return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
+}
+
+static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u8 *qos_ctl;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return;
+
+ qos_ctl = ieee80211_get_qos_ctl(hdr);
+ memmove(skb->data + IEEE80211_QOS_CTL_LEN,
+ skb->data, (void *)qos_ctl - (void *)skb->data);
+ skb_pull(skb, IEEE80211_QOS_CTL_LEN);
+
+ hdr = (void *)skb->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+}
+
+static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath11k_skb_cb *cb = ATH11K_SKB_CB(skb);
+
+ if (cb->flags & ATH11K_SKB_HW_80211_ENCAP)
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ else if (!ieee80211_is_data_qos(hdr->frame_control))
+ return HAL_DESC_REO_NON_QOS_TID;
+ else
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return HAL_ENCRYPT_TYPE_WEP_40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return HAL_ENCRYPT_TYPE_WEP_104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return HAL_ENCRYPT_TYPE_TKIP_MIC;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return HAL_ENCRYPT_TYPE_CCMP_128;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return HAL_ENCRYPT_TYPE_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return HAL_ENCRYPT_TYPE_GCMP_128;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return HAL_ENCRYPT_TYPE_AES_GCMP_256;
+ default:
+ return HAL_ENCRYPT_TYPE_OPEN;
+ }
+}
+
+int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct ath11k_sta *arsta, struct sk_buff *skb)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_tx_info ti = {};
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct hal_srng *tcl_ring;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct dp_tx_ring *tx_ring;
+ void *hal_tcl_desc;
+ u8 pool_id;
+ u8 hal_ring_id;
+ int ret;
+ u32 ring_selector = 0;
+ u8 ring_map = 0;
+ bool tcl_ring_retry;
+
+ if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
+ return -ESHUTDOWN;
+
+ if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
+ !ieee80211_is_data(hdr->frame_control)))
+ return -EOPNOTSUPP;
+
+ pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
+
+ ring_selector = ab->hw_params.hw_ops->get_ring_selector(skb);
+
+tcl_ring_sel:
+ tcl_ring_retry = false;
+
+ ti.ring_id = ring_selector % ab->hw_params.max_tx_ring;
+ ti.rbm_id = ab->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
+
+ ring_map |= BIT(ti.ring_id);
+
+ tx_ring = &dp->tx_ring[ti.ring_id];
+
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0,
+ DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ if (unlikely(ret < 0)) {
+ if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1) ||
+ !ab->hw_params.tcl_ring_retry) {
+ atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ return -ENOSPC;
+ }
+
+ /* Check if the next ring is available */
+ ring_selector++;
+ goto tcl_ring_sel;
+ }
+
+ ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) |
+ FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
+ FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
+ ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
+
+ if (ieee80211_has_a4(hdr->frame_control) &&
+ is_multicast_ether_addr(hdr->addr3) && arsta &&
+ arsta->use_4addr_set) {
+ ti.meta_data_flags = arsta->tcl_metadata;
+ ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW, 1);
+ } else {
+ ti.meta_data_flags = arvif->tcl_metadata;
+ }
+
+ if (unlikely(ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW)) {
+ if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) {
+ ti.encrypt_type =
+ ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
+
+ if (ieee80211_has_protected(hdr->frame_control))
+ skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ } else {
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+ }
+ }
+
+ ti.addr_search_flags = arvif->hal_addr_search_flags;
+ ti.search_type = arvif->search_type;
+ ti.type = HAL_TCL_DESC_TYPE_BUFFER;
+ ti.pkt_offset = 0;
+ ti.lmac_id = ar->lmac_id;
+ ti.bss_ast_hash = arvif->ast_hash;
+ ti.bss_ast_idx = arvif->ast_idx;
+ ti.dscp_tid_tbl_idx = 0;
+
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL &&
+ ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) {
+ ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
+ }
+
+ if (ieee80211_vif_is_mesh(arvif->vif))
+ ti.enable_mesh = true;
+
+ ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
+
+ ti.tid = ath11k_dp_tx_get_tid(skb);
+
+ switch (ti.encap_type) {
+ case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
+ ath11k_dp_tx_encap_nwifi(skb);
+ break;
+ case HAL_TCL_ENCAP_TYPE_RAW:
+ if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ ret = -EINVAL;
+ goto fail_remove_idr;
+ }
+ break;
+ case HAL_TCL_ENCAP_TYPE_ETHERNET:
+ /* no need to encap */
+ break;
+ case HAL_TCL_ENCAP_TYPE_802_3:
+ default:
+ /* TODO: Take care of other encap modes as well */
+ ret = -EINVAL;
+ atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ goto fail_remove_idr;
+ }
+
+ ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) {
+ atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
+ ret = -ENOMEM;
+ goto fail_remove_idr;
+ }
+
+ ti.data_len = skb->len;
+ skb_cb->paddr = ti.paddr;
+ skb_cb->vif = arvif->vif;
+ skb_cb->ar = ar;
+
+ hal_ring_id = tx_ring->tcl_data_ring.ring_id;
+ tcl_ring = &ab->hal.srng_list[hal_ring_id];
+
+ spin_lock_bh(&tcl_ring->lock);
+
+ ath11k_hal_srng_access_begin(ab, tcl_ring);
+
+ hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring);
+ if (unlikely(!hal_tcl_desc)) {
+ /* NOTE: It is highly unlikely we'll be running out of tcl_ring
+ * desc because the desc is directly enqueued onto hw queue.
+ */
+ ath11k_hal_srng_access_end(ab, tcl_ring);
+ ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
+ spin_unlock_bh(&tcl_ring->lock);
+ ret = -ENOMEM;
+
+ /* Checking for available tcl descriptors in another ring in
+ * case of failure due to full tcl ring now, is better than
+ * checking this ring earlier for each pkt tx.
+ * Restart ring selection if some rings are not checked yet.
+ */
+ if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) &&
+ ab->hw_params.tcl_ring_retry && ab->hw_params.max_tx_ring > 1) {
+ tcl_ring_retry = true;
+ ring_selector++;
+ }
+
+ goto fail_unmap_dma;
+ }
+
+ ath11k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc +
+ sizeof(struct hal_tlv_hdr), &ti);
+
+ ath11k_hal_srng_access_end(ab, tcl_ring);
+
+ ath11k_dp_shadow_start_timer(ab, tcl_ring, &dp->tx_ring_timer[ti.ring_id]);
+
+ spin_unlock_bh(&tcl_ring->lock);
+
+ ath11k_dbg_dump(ab, ATH11K_DBG_DP_TX, NULL, "dp tx msdu: ",
+ skb->data, skb->len);
+
+ atomic_inc(&ar->dp.num_tx_pending);
+
+ return 0;
+
+fail_unmap_dma:
+ dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
+
+fail_remove_idr:
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ idr_remove(&tx_ring->txbuf_idr,
+ FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id));
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ if (tcl_ring_retry)
+ goto tcl_ring_sel;
+
+ return ret;
+}
+
+static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
+ int msdu_id,
+ struct dp_tx_ring *tx_ring)
+{
+ struct ath11k *ar;
+ struct sk_buff *msdu;
+ struct ath11k_skb_cb *skb_cb;
+
+ spin_lock(&tx_ring->tx_idr_lock);
+ msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
+ spin_unlock(&tx_ring->tx_idr_lock);
+
+ if (unlikely(!msdu)) {
+ ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
+ msdu_id);
+ return;
+ }
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(msdu);
+
+ ar = ab->pdevs[mac_id].ar;
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+}
+
+static void
+ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
+ struct dp_tx_ring *tx_ring,
+ struct ath11k_dp_htt_wbm_tx_status *ts)
+{
+ struct ieee80211_tx_status status = {};
+ struct sk_buff *msdu;
+ struct ieee80211_tx_info *info;
+ struct ath11k_skb_cb *skb_cb;
+ struct ath11k *ar;
+ struct ath11k_peer *peer;
+
+ spin_lock(&tx_ring->tx_idr_lock);
+ msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
+ spin_unlock(&tx_ring->tx_idr_lock);
+
+ if (unlikely(!msdu)) {
+ ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
+ ts->msdu_id);
+ return;
+ }
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+ info = IEEE80211_SKB_CB(msdu);
+
+ ar = skb_cb->ar;
+
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ if (!skb_cb->vif) {
+ ieee80211_free_txskb(ar->hw, msdu);
+ return;
+ }
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ if (ts->acked) {
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR;
+
+ info->status.flags |=
+ IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ } else {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ }
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "dp_tx: failed to find the peer with peer_id %d\n",
+ ts->peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ ieee80211_free_txskb(ar->hw, msdu);
+ return;
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ status.sta = peer->sta;
+ status.info = info;
+ status.skb = msdu;
+
+ ieee80211_tx_status_ext(ar->hw, &status);
+}
+
+static void
+ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
+ void *desc, u8 mac_id,
+ u32 msdu_id, struct dp_tx_ring *tx_ring)
+{
+ struct htt_tx_wbm_completion *status_desc;
+ struct ath11k_dp_htt_wbm_tx_status ts = {};
+ enum hal_wbm_htt_tx_comp_status wbm_status;
+
+ status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
+
+ wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
+ status_desc->info0);
+ switch (wbm_status) {
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
+ ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
+ ts.msdu_id = msdu_id;
+ ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
+ status_desc->info1);
+
+ if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID, status_desc->info2))
+ ts.peer_id = FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID,
+ status_desc->info2);
+ else
+ ts.peer_id = HTT_INVALID_PEER_ID;
+
+ ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts);
+
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
+ ath11k_dp_tx_free_txbuf(ab, mac_id, msdu_id, tx_ring);
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
+ /* This event is to be handled only when the driver decides to
+ * use WDS offload functionality.
+ */
+ break;
+ default:
+ ath11k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
+ break;
+ }
+}
+
+static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
+
+ if (ts->try_cnt > 1) {
+ peer_stats->retry_pkts += ts->try_cnt - 1;
+ peer_stats->retry_bytes += (ts->try_cnt - 1) * msdu->len;
+
+ if (ts->status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) {
+ peer_stats->failed_pkts += 1;
+ peer_stats->failed_bytes += msdu->len;
+ }
+ }
+}
+
+void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
+ enum hal_tx_rate_stats_pkt_type pkt_type;
+ enum hal_tx_rate_stats_sgi sgi;
+ enum hal_tx_rate_stats_bw bw;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ u16 rate, ru_tones;
+ u8 mcs, rate_idx = 0, ofdma;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DP_TX,
+ "failed to find the peer by id %u\n", ts->peer_id);
+ goto err_out;
+ }
+
+ sta = peer->sta;
+ arsta = ath11k_sta_to_arsta(sta);
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+ pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE,
+ ts->rate_stats);
+ mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS,
+ ts->rate_stats);
+ sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI,
+ ts->rate_stats);
+ bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats);
+ ru_tones = FIELD_GET(HAL_TX_RATE_STATS_INFO0_TONES_IN_RU, ts->rate_stats);
+ ofdma = FIELD_GET(HAL_TX_RATE_STATS_INFO0_OFDMA_TX, ts->rate_stats);
+
+ /* This is to prefer choose the real NSS value arsta->last_txrate.nss,
+ * if it is invalid, then choose the NSS value while assoc.
+ */
+ if (arsta->last_txrate.nss)
+ arsta->txrate.nss = arsta->last_txrate.nss;
+ else
+ arsta->txrate.nss = arsta->peer_nss;
+
+ if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A ||
+ pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) {
+ ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
+ pkt_type,
+ &rate_idx,
+ &rate);
+ if (ret < 0)
+ goto err_out;
+ arsta->txrate.legacy = rate;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) {
+ if (mcs > 7) {
+ ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs);
+ goto err_out;
+ }
+
+ if (arsta->txrate.nss != 0)
+ arsta->txrate.mcs = mcs + 8 * (arsta->txrate.nss - 1);
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) {
+ if (mcs > 9) {
+ ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs);
+ goto err_out;
+ }
+
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+ if (mcs > 11) {
+ ath11k_warn(ab, "Invalid HE mcs index %d\n", mcs);
+ goto err_out;
+ }
+
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+ arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
+ }
+
+ arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ if (ofdma && pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+ arsta->txrate.bw = RATE_INFO_BW_HE_RU;
+ arsta->txrate.he_ru_alloc =
+ ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
+ }
+
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
+ ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
+
+err_out:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+ struct ieee80211_tx_status status = {};
+ struct ieee80211_rate_status status_rate = {};
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_tx_info *info;
+ struct ath11k_skb_cb *skb_cb;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ struct rate_info rate;
+
+ if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
+ /* Must not happen */
+ return;
+ }
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) {
+ ieee80211_free_txskb(ar->hw, msdu);
+ return;
+ }
+
+ if (unlikely(!skb_cb->vif)) {
+ ieee80211_free_txskb(ar->hw, msdu);
+ return;
+ }
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ /* skip tx rate update from ieee80211_status*/
+ info->status.rates[0].idx = -1;
+
+ if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
+ !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR;
+
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ }
+
+ if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
+ if (unlikely(ath11k_debugfs_is_extd_tx_stats_enabled(ar)) ||
+ ab->hw_params.single_pdev_only) {
+ if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
+ if (ar->last_ppdu_id == 0) {
+ ar->last_ppdu_id = ts->ppdu_id;
+ } else if (ar->last_ppdu_id == ts->ppdu_id ||
+ ar->cached_ppdu_id == ar->last_ppdu_id) {
+ ar->cached_ppdu_id = ar->last_ppdu_id;
+ ar->cached_stats.is_ampdu = true;
+ ath11k_dp_tx_update_txcompl(ar, ts);
+ memset(&ar->cached_stats, 0,
+ sizeof(struct ath11k_per_peer_tx_stats));
+ } else {
+ ar->cached_stats.is_ampdu = false;
+ ath11k_dp_tx_update_txcompl(ar, ts);
+ memset(&ar->cached_stats, 0,
+ sizeof(struct ath11k_per_peer_tx_stats));
+ }
+ ar->last_ppdu_id = ts->ppdu_id;
+ }
+
+ ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "dp_tx: failed to find the peer with peer_id %d\n",
+ ts->peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ ieee80211_free_txskb(ar->hw, msdu);
+ return;
+ }
+ arsta = ath11k_sta_to_arsta(peer->sta);
+ status.sta = peer->sta;
+ status.skb = msdu;
+ status.info = info;
+ rate = arsta->last_txrate;
+
+ status_rate.rate_idx = rate;
+ status_rate.try_count = 1;
+
+ status.rates = &status_rate;
+ status.n_rates = 1;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ ieee80211_tx_status_ext(ar->hw, &status);
+}
+
+static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
+ struct hal_wbm_release_ring *desc,
+ struct hal_tx_status *ts)
+{
+ ts->buf_rel_source =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0);
+ if (unlikely(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
+ ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM))
+ return;
+
+ if (unlikely(ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW))
+ return;
+
+ ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
+ desc->info0);
+ ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
+ desc->info1);
+ ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
+ desc->info1);
+ ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
+ desc->info2);
+ if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
+ ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
+ ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
+ ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
+ if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
+ ts->rate_stats = desc->rate_stats.info0;
+ else
+ ts->rate_stats = 0;
+}
+
+void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
+{
+ struct ath11k *ar;
+ struct ath11k_dp *dp = &ab->dp;
+ int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
+ struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
+ struct sk_buff *msdu;
+ struct hal_tx_status ts = {};
+ struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
+ u32 *desc;
+ u32 msdu_id;
+ u8 mac_id;
+
+ spin_lock_bh(&status_ring->lock);
+
+ ath11k_hal_srng_access_begin(ab, status_ring);
+
+ while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
+ tx_ring->tx_status_tail) &&
+ (desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) {
+ memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
+ desc, sizeof(struct hal_wbm_release_ring));
+ tx_ring->tx_status_head =
+ ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head);
+ }
+
+ if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
+ (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
+ tx_ring->tx_status_tail))) {
+ /* TODO: Process pending tx_status messages when kfifo_is_full() */
+ ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
+ }
+
+ ath11k_hal_srng_access_end(ab, status_ring);
+
+ spin_unlock_bh(&status_ring->lock);
+
+ while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
+ struct hal_wbm_release_ring *tx_status;
+ u32 desc_id;
+
+ tx_ring->tx_status_tail =
+ ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
+ tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
+ ath11k_dp_tx_status_parse(ab, tx_status, &ts);
+
+ desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ tx_status->buf_addr_info.info1);
+ mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
+ msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
+
+ if (unlikely(ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) {
+ ath11k_dp_tx_process_htt_tx_complete(ab,
+ (void *)tx_status,
+ mac_id, msdu_id,
+ tx_ring);
+ continue;
+ }
+
+ spin_lock(&tx_ring->tx_idr_lock);
+ msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
+ if (unlikely(!msdu)) {
+ ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
+ msdu_id);
+ spin_unlock(&tx_ring->tx_idr_lock);
+ continue;
+ }
+
+ spin_unlock(&tx_ring->tx_idr_lock);
+
+ ar = ab->pdevs[mac_id].ar;
+
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
+ ath11k_dp_tx_complete_msdu(ar, msdu, &ts);
+ }
+}
+
+int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd,
+ void (*cb)(struct ath11k_dp *, void *,
+ enum hal_reo_cmd_status))
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_reo_cmd *dp_cmd;
+ struct hal_srng *cmd_ring;
+ int cmd_num;
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ return -ESHUTDOWN;
+
+ cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
+
+ /* cmd_num should start from 1, during failure return the error code */
+ if (cmd_num < 0)
+ return cmd_num;
+
+ /* reo cmd ring descriptors has cmd_num starting from 1 */
+ if (cmd_num == 0)
+ return -EINVAL;
+
+ if (!cb)
+ return 0;
+
+ /* Can this be optimized so that we keep the pending command list only
+ * for tid delete command to free up the resource on the command status
+ * indication?
+ */
+ dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
+
+ if (!dp_cmd)
+ return -ENOMEM;
+
+ memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
+ dp_cmd->cmd_num = cmd_num;
+ dp_cmd->handler = cb;
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ return 0;
+}
+
+static int
+ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab,
+ int mac_id, u32 ring_id,
+ enum hal_ring_type ring_type,
+ enum htt_srng_ring_type *htt_ring_type,
+ enum htt_srng_ring_id *htt_ring_id)
+{
+ int lmac_ring_id_offset = 0;
+ int ret = 0;
+
+ switch (ring_type) {
+ case HAL_RXDMA_BUF:
+ lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
+
+ /* for QCA6390, host fills rx buffer to fw and fw fills to
+ * rxbuf ring for each rxdma
+ */
+ if (!ab->hw_params.rx_mac_buf_ring) {
+ if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
+ lmac_ring_id_offset) ||
+ ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
+ lmac_ring_id_offset))) {
+ ret = -EINVAL;
+ }
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ } else {
+ if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) {
+ *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
+ *htt_ring_type = HTT_SW_TO_SW_RING;
+ } else {
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ }
+ }
+ break;
+ case HAL_RXDMA_DST:
+ *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_BUF:
+ *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DST:
+ *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DESC:
+ *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ default:
+ ath11k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type)
+{
+ struct htt_srng_setup_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ u32 ring_entry_sz;
+ int len = sizeof(*cmd);
+ dma_addr_t hp_addr, tp_addr;
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath11k_hal_srng_get_params(ab, srng, &params);
+
+ hp_addr = ath11k_hal_srng_get_hp_addr(ab, srng);
+ tp_addr = ath11k_hal_srng_get_tp_addr(ab, srng);
+
+ ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_srng_setup_cmd *)skb->data;
+ cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_SRING_SETUP);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
+ DP_SW2HW_MACID(mac_id));
+ else
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
+ mac_id);
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
+ htt_ring_type);
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
+
+ cmd->ring_base_addr_lo = params.ring_base_paddr &
+ HAL_ADDR_LSB_REG_MASK;
+
+ cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT;
+
+ ret = ath11k_hal_srng_get_entrysize(ab, ring_type);
+ if (ret < 0)
+ goto err_free;
+
+ ring_entry_sz = ret;
+
+ ring_entry_sz >>= 2;
+ cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
+ ring_entry_sz);
+ cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
+ params.num_entries * ring_entry_sz);
+ cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
+ !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
+ cmd->info1 |= FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
+ !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
+ cmd->info1 |= FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
+ !!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
+ if (htt_ring_type == HTT_SW_TO_HW_RING)
+ cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
+
+ cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
+ cmd->ring_head_off32_remote_addr_hi = (u64)hp_addr >>
+ HAL_ADDR_MSB_REG_SHIFT;
+
+ cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
+ cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >>
+ HAL_ADDR_MSB_REG_SHIFT;
+
+ cmd->ring_msi_addr_lo = lower_32_bits(params.msi_addr);
+ cmd->ring_msi_addr_hi = upper_32_bits(params.msi_addr);
+ cmd->msi_data = params.msi_data;
+
+ cmd->intr_info = FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
+ params.intr_batch_cntr_thres_entries * ring_entry_sz);
+ cmd->intr_info |= FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
+ params.intr_timer_thres_us >> 3);
+
+ cmd->info2 = 0;
+ if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ cmd->info2 = FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
+ params.low_threshold);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_TX,
+ "htt srng setup msi_addr_lo 0x%x msi_addr_hi 0x%x msi_data 0x%x ring_id %d ring_type %d intr_info 0x%x flags 0x%x\n",
+ cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
+ cmd->msi_data, ring_id, ring_type, cmd->intr_info, cmd->info2);
+
+ ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
+
+int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ver_req_cmd *cmd;
+ int len = sizeof(*cmd);
+ int ret;
+
+ init_completion(&dp->htt_tgt_version_received);
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ver_req_cmd *)skb->data;
+ cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
+ HTT_H2T_MSG_TYPE_VERSION_REQ);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
+ HTT_TARGET_VERSION_TIMEOUT_HZ);
+ if (ret == 0) {
+ ath11k_warn(ab, "htt target version request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
+ ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
+ dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ppdu_stats_cfg_cmd *cmd;
+ int len = sizeof(*cmd);
+ u8 pdev_mask;
+ int ret;
+ int i;
+
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
+ cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
+
+ pdev_mask = 1 << (ar->pdev_idx + i);
+ cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
+ cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter)
+{
+ struct htt_rx_ring_selection_cfg_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ int len = sizeof(*cmd);
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath11k_hal_srng_get_params(ab, srng, &params);
+
+ ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
+ cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |=
+ FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
+ DP_SW2HW_MACID(mac_id));
+ else
+ cmd->info0 |=
+ FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
+ mac_id);
+ cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
+ htt_ring_id);
+ cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
+ !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
+ cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
+ !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
+
+ cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
+ rx_buf_size);
+ cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
+ cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
+ cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
+ cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
+ cmd->rx_filter_tlv = tlv_filter->rx_filter;
+
+ ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int
+ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ext_stats_cfg_cmd *cmd;
+ u32 pdev_id;
+ int len = sizeof(*cmd);
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
+
+ if (ab->hw_params.single_pdev_only)
+ pdev_id = ath11k_mac_get_target_pdev_id(ar);
+ else
+ pdev_id = ar->pdev->pdev_id;
+
+ cmd->hdr.pdev_mask = 1 << pdev_id;
+
+ cmd->hdr.stats_type = type;
+ cmd->cfg_param0 = cfg_params->cfg0;
+ cmd->cfg_param1 = cfg_params->cfg1;
+ cmd->cfg_param2 = cfg_params->cfg2;
+ cmd->cfg_param3 = cfg_params->cfg3;
+ cmd->cookie_lsb = lower_32_bits(cookie);
+ cmd->cookie_msb = upper_32_bits(cookie);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ ath11k_warn(ab, "failed to send htt type stats request: %d",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
+ int ret = 0, ring_id = 0, i;
+
+ if (ab->hw_params.full_monitor_mode) {
+ ret = ath11k_dp_tx_htt_rx_full_mon_setup(ab,
+ dp->mac_id, !reset);
+ if (ret < 0) {
+ ath11k_err(ab, "failed to setup full monitor %d\n", ret);
+ return ret;
+ }
+ }
+
+ ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
+
+ if (!reset) {
+ tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
+ tlv_filter.pkt_filter_flags0 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
+ HTT_RX_MON_MO_DATA_FILTER_FLASG3;
+ }
+
+ if (ab->hw_params.rxdma1_enable) {
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
+ HAL_RXDMA_MONITOR_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ } else if (!reset) {
+ /* set in monitor mode only */
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ ring_id = dp->rx_mac_buf_ring[i].ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ dp->mac_id + i,
+ HAL_RXDMA_BUF,
+ 1024,
+ &tlv_filter);
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ if (!reset) {
+ tlv_filter.rx_filter =
+ HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
+ } else {
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+
+ if (ath11k_debugfs_is_extd_rx_stats_enabled(ar))
+ tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
+ }
+
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+ dp->mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ }
+
+ if (!ar->ab->hw_params.rxdma1_enable)
+ mod_timer(&ar->ab->mon_reap_timer, jiffies +
+ msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
+
+ return ret;
+}
+
+int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id,
+ bool config)
+{
+ struct htt_rx_full_monitor_mode_cfg_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len = sizeof(*cmd);
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_rx_full_monitor_mode_cfg_cmd *)skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->info0 = FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
+
+ cmd->info0 |= FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID, mac_id);
+
+ cmd->cfg = HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE |
+ FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING,
+ HTT_RX_MON_RING_SW);
+ if (config) {
+ cmd->cfg |= HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END |
+ HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END;
+ }
+
+ ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h
new file mode 100644
index 000000000000..795fe3b8fa0d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_DP_TX_H
+#define ATH11K_DP_TX_H
+
+#include "core.h"
+#include "hal_tx.h"
+
+struct ath11k_dp_htt_wbm_tx_status {
+ u32 msdu_id;
+ bool acked;
+ s8 ack_rssi;
+ u16 peer_id;
+};
+
+void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts);
+int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab);
+int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct ath11k_sta *arsta, struct sk_buff *skb);
+void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id);
+int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd,
+ void (*func)(struct ath11k_dp *, void *,
+ enum hal_reo_cmd_status));
+
+int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask);
+int
+ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie);
+int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset);
+
+int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter);
+
+int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id,
+ bool config);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/fw.c b/drivers/net/wireless/ath/ath11k/fw.c
new file mode 100644
index 000000000000..07d775a7b528
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/fw.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/export.h>
+#include "core.h"
+
+#include "debug.h"
+
+static int ath11k_fw_request_firmware_api_n(struct ath11k_base *ab,
+ const char *name)
+{
+ size_t magic_len, len, ie_len;
+ int ie_id, i, index, bit, ret;
+ struct ath11k_fw_ie *hdr;
+ const u8 *data;
+ __le32 *timestamp;
+
+ ab->fw.fw = ath11k_core_firmware_request(ab, name);
+ if (IS_ERR(ab->fw.fw)) {
+ ret = PTR_ERR(ab->fw.fw);
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to load %s: %d\n", name, ret);
+ ab->fw.fw = NULL;
+ return ret;
+ }
+
+ data = ab->fw.fw->data;
+ len = ab->fw.fw->size;
+
+ /* magic also includes the null byte, check that as well */
+ magic_len = strlen(ATH11K_FIRMWARE_MAGIC) + 1;
+
+ if (len < magic_len) {
+ ath11k_err(ab, "firmware image too small to contain magic: %zu\n",
+ len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH11K_FIRMWARE_MAGIC, magic_len) != 0) {
+ ath11k_err(ab, "Invalid firmware magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* jump over the padding */
+ magic_len = ALIGN(magic_len, 4);
+
+ /* make sure there's space for padding */
+ if (magic_len > len) {
+ ath11k_err(ab, "No space for padding after magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ len -= magic_len;
+ data += magic_len;
+
+ /* loop elements */
+ while (len > sizeof(struct ath11k_fw_ie)) {
+ hdr = (struct ath11k_fw_ie *)data;
+
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data += sizeof(*hdr);
+
+ if (len < ie_len) {
+ ath11k_err(ab, "Invalid length for FW IE %d (%zu < %zu)\n",
+ ie_id, len, ie_len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (ie_id) {
+ case ATH11K_FW_IE_TIMESTAMP:
+ if (ie_len != sizeof(u32))
+ break;
+
+ timestamp = (__le32 *)data;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "found fw timestamp %d\n",
+ le32_to_cpup(timestamp));
+ break;
+ case ATH11K_FW_IE_FEATURES:
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "found firmware features ie (%zd B)\n",
+ ie_len);
+
+ for (i = 0; i < ATH11K_FW_FEATURE_COUNT; i++) {
+ index = i / 8;
+ bit = i % 8;
+
+ if (index == ie_len)
+ break;
+
+ if (data[index] & (1 << bit))
+ __set_bit(i, ab->fw.fw_features);
+ }
+
+ ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "features", "",
+ ab->fw.fw_features,
+ sizeof(ab->fw.fw_features));
+ break;
+ case ATH11K_FW_IE_AMSS_IMAGE:
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "found fw image ie (%zd B)\n",
+ ie_len);
+
+ ab->fw.amss_data = data;
+ ab->fw.amss_len = ie_len;
+ break;
+ case ATH11K_FW_IE_M3_IMAGE:
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "found m3 image ie (%zd B)\n",
+ ie_len);
+
+ ab->fw.m3_data = data;
+ ab->fw.m3_len = ie_len;
+ break;
+ default:
+ ath11k_warn(ab, "Unknown FW IE: %u\n", ie_id);
+ break;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ /* make sure there's space for padding */
+ if (ie_len > len)
+ break;
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+ return 0;
+
+err:
+ release_firmware(ab->fw.fw);
+ ab->fw.fw = NULL;
+ return ret;
+}
+
+int ath11k_fw_pre_init(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_fw_request_firmware_api_n(ab, ATH11K_FW_API2_FILE);
+ if (ret == 0) {
+ ab->fw.api_version = 2;
+ goto out;
+ }
+
+ ab->fw.api_version = 1;
+
+out:
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "using fw api %d\n",
+ ab->fw.api_version);
+
+ return 0;
+}
+
+void ath11k_fw_destroy(struct ath11k_base *ab)
+{
+ release_firmware(ab->fw.fw);
+}
+EXPORT_SYMBOL(ath11k_fw_destroy);
diff --git a/drivers/net/wireless/ath/ath11k/fw.h b/drivers/net/wireless/ath/ath11k/fw.h
new file mode 100644
index 000000000000..d9893ceb2c3d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/fw.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_FW_H
+#define ATH11K_FW_H
+
+#define ATH11K_FW_API2_FILE "firmware-2.bin"
+#define ATH11K_FIRMWARE_MAGIC "QCOM-ATH11K-FW"
+
+enum ath11k_fw_ie_type {
+ ATH11K_FW_IE_TIMESTAMP = 0,
+ ATH11K_FW_IE_FEATURES = 1,
+ ATH11K_FW_IE_AMSS_IMAGE = 2,
+ ATH11K_FW_IE_M3_IMAGE = 3,
+};
+
+enum ath11k_fw_features {
+ /* keep last */
+ ATH11K_FW_FEATURE_COUNT,
+};
+
+int ath11k_fw_pre_init(struct ath11k_base *ab);
+void ath11k_fw_destroy(struct ath11k_base *ab);
+
+#endif /* ATH11K_FW_H */
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
new file mode 100644
index 000000000000..0c797b8d0a27
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -0,0 +1,1455 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include "hal_tx.h"
+#include "debug.h"
+#include "hal_desc.h"
+#include "hif.h"
+
+static const struct hal_srng_config hw_srng_config_template[] = {
+ /* TODO: max_rings can populated by querying HW capabilities */
+ { /* REO_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
+ .max_rings = 4,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_EXCEPTION */
+ /* Designating REO2TCL ring as exception ring. This ring is
+ * similar to other REO2SW rings though it is named as REO2TCL.
+ * Any of theREO2SW rings can be used as exception ring.
+ */
+ .start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_REINJECT */
+ .start_ring_id = HAL_SRNG_RING_ID_SW2REO,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_CMD */
+ .start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_reo_get_queue_stats)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* TCL_DATA */
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
+ .max_rings = 3,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_data_cmd)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* TCL_CMD */
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_gse_cmd)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* TCL_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_status_ring)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* CE_SRC */
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
+ .max_rings = 12,
+ .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* CE_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
+ .max_rings = 12,
+ .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* CE_DST_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
+ .max_rings = 12,
+ .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* WBM_IDLE_LINK */
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* SW2WBM_RELEASE */
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* WBM2SW_RELEASE */
+ .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+ .max_rings = 5,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* RXDMA_BUF */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
+ .max_rings = 2,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_BUF */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_DESC */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA DIR BUF */
+ .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+ .max_rings = 1,
+ .entry_size = 8 >> 2, /* TODO: Define the struct */
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+};
+
+static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
+ hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
+ GFP_KERNEL);
+ if (!hal->rdp.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ if (!hal->rdp.vaddr)
+ return;
+
+ size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
+ dma_free_coherent(ab->dev, size,
+ hal->rdp.vaddr, hal->rdp.paddr);
+ hal->rdp.vaddr = NULL;
+}
+
+static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
+ hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
+ GFP_KERNEL);
+ if (!hal->wrp.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ if (!hal->wrp.vaddr)
+ return;
+
+ size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
+ dma_free_coherent(ab->dev, size,
+ hal->wrp.vaddr, hal->wrp.paddr);
+ hal->wrp.vaddr = NULL;
+}
+
+static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
+ struct hal_srng *srng, int ring_num)
+{
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
+ u32 addr;
+ u32 val;
+
+ addr = HAL_CE_DST_RING_CTRL +
+ srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
+ ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
+
+ val = ath11k_hif_read32(ab, addr);
+ val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
+ val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
+ srng->u.dst_ring.max_buffer_length);
+ ath11k_hif_write32(ab, addr, val);
+}
+
+static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 val;
+ u64 hp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath11k_hif_write32(ab, reg_base +
+ HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab),
+ srng->msi_addr);
+
+ val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
+ ((u64)srng->msi_addr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath11k_hif_write32(ab, reg_base +
+ HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab), val);
+
+ ath11k_hif_write32(ab,
+ reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(ab),
+ srng->msi_data);
+ }
+
+ ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
+
+ val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+ ((u64)srng->ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
+ (srng->entry_size * srng->num_entries));
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(ab), val);
+
+ val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
+ FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET(ab), val);
+
+ /* interrupt setup */
+ val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
+ (srng->intr_timer_thres_us >> 3));
+
+ val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
+ (srng->intr_batch_cntr_thres_entries *
+ srng->entry_size));
+
+ ath11k_hif_write32(ab,
+ reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab),
+ val);
+
+ hp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.dst_ring.hp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab),
+ hp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab),
+ hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath11k_hif_write32(ab, reg_base, 0);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET(ab), 0);
+ *srng->u.dst_ring.hp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_REO1_RING_MISC_MSI_SWAP;
+ val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
+
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET(ab), val);
+}
+
+static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 val;
+ u64 tp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath11k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
+ srng->msi_addr);
+
+ val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
+ ((u64)srng->msi_addr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath11k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab),
+ val);
+
+ ath11k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_DATA_OFFSET(ab),
+ srng->msi_data);
+ }
+
+ ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
+
+ val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+ ((u64)srng->ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
+ (srng->entry_size * srng->num_entries));
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
+
+ val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
+
+ if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
+ ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+ val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+ ((u64)srng->ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
+ (srng->entry_size * srng->num_entries));
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
+ }
+
+ /* interrupt setup */
+ /* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
+ * unit of 8 usecs instead of 1 usec (as required by v1).
+ */
+ val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
+ srng->intr_timer_thres_us);
+
+ val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
+ (srng->intr_batch_cntr_thres_entries *
+ srng->entry_size));
+
+ ath11k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab),
+ val);
+
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
+ srng->u.src_ring.low_threshold);
+ }
+ ath11k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab),
+ val);
+
+ if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
+ tp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.src_ring.tp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath11k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab),
+ tp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath11k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab),
+ tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+ }
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath11k_hif_write32(ab, reg_base, 0);
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
+ *srng->u.src_ring.tp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_TCL1_RING_MISC_MSI_SWAP;
+
+ /* Loop count is not used for SRC rings */
+ val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
+
+ val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
+
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
+}
+
+static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ ath11k_hal_srng_src_hw_init(ab, srng);
+ else
+ ath11k_hal_srng_dst_hw_init(ab, srng);
+}
+
+static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
+ enum hal_ring_type type,
+ int ring_num, int mac_id)
+{
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
+ int ring_id;
+
+ if (ring_num >= srng_config->max_rings) {
+ ath11k_warn(ab, "invalid ring number :%d\n", ring_num);
+ return -EINVAL;
+ }
+
+ ring_id = srng_config->start_ring_id + ring_num;
+ if (srng_config->lmac_ring)
+ ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
+
+ if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
+ return -EINVAL;
+
+ return ring_id;
+}
+
+int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type)
+{
+ struct hal_srng_config *srng_config;
+
+ if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
+ return -EINVAL;
+
+ srng_config = &ab->hal.srng_config[ring_type];
+
+ return (srng_config->entry_size << 2);
+}
+
+int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type)
+{
+ struct hal_srng_config *srng_config;
+
+ if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
+ return -EINVAL;
+
+ srng_config = &ab->hal.srng_config[ring_type];
+
+ return (srng_config->max_size / srng_config->entry_size);
+}
+
+void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
+ struct hal_srng_params *params)
+{
+ params->ring_base_paddr = srng->ring_base_paddr;
+ params->ring_base_vaddr = srng->ring_base_vaddr;
+ params->num_entries = srng->num_entries;
+ params->intr_timer_thres_us = srng->intr_timer_thres_us;
+ params->intr_batch_cntr_thres_entries =
+ srng->intr_batch_cntr_thres_entries;
+ params->low_threshold = srng->u.src_ring.low_threshold;
+ params->msi_addr = srng->msi_addr;
+ params->msi_data = srng->msi_data;
+ params->flags = srng->flags;
+}
+
+dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+ return 0;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ return ab->hal.wrp.paddr +
+ ((unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->hal.wrp.vaddr);
+ else
+ return ab->hal.rdp.paddr +
+ ((unsigned long)srng->u.dst_ring.hp_addr -
+ (unsigned long)ab->hal.rdp.vaddr);
+}
+
+dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+ return 0;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ return ab->hal.rdp.paddr +
+ ((unsigned long)srng->u.src_ring.tp_addr -
+ (unsigned long)ab->hal.rdp.vaddr);
+ else
+ return ab->hal.wrp.paddr +
+ ((unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->hal.wrp.vaddr);
+}
+
+u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)
+{
+ switch (type) {
+ case HAL_CE_DESC_SRC:
+ return sizeof(struct hal_ce_srng_src_desc);
+ case HAL_CE_DESC_DST:
+ return sizeof(struct hal_ce_srng_dest_desc);
+ case HAL_CE_DESC_DST_STATUS:
+ return sizeof(struct hal_ce_srng_dst_status_desc);
+ }
+
+ return 0;
+}
+
+void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
+ u8 byte_swap_data)
+{
+ struct hal_ce_srng_src_desc *desc = buf;
+
+ desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
+ desc->buffer_addr_info =
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
+ ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
+ byte_swap_data) |
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
+ desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
+}
+
+void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr)
+{
+ struct hal_ce_srng_dest_desc *desc = buf;
+
+ desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
+ desc->buffer_addr_info =
+ FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
+ ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT));
+}
+
+u32 ath11k_hal_ce_dst_status_get_length(void *buf)
+{
+ struct hal_ce_srng_dst_status_desc *desc = buf;
+ u32 len;
+
+ len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
+ desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
+
+ return len;
+}
+
+void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr)
+{
+ desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ (paddr & HAL_ADDR_LSB_REG_MASK));
+ desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
+ ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
+}
+
+u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
+ return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
+
+ return NULL;
+}
+
+static u32 *ath11k_hal_srng_dst_peek_with_dma(struct ath11k_base *ab,
+ struct hal_srng *srng, dma_addr_t *paddr)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
+ *paddr = srng->ring_base_paddr +
+ sizeof(*srng->ring_base_vaddr) * srng->u.dst_ring.tp;
+ return srng->ring_base_vaddr + srng->u.dst_ring.tp;
+ }
+
+ return NULL;
+}
+
+static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ dma_addr_t desc_paddr;
+ u32 *desc;
+
+ /* prefetch only if desc is available */
+ desc = ath11k_hal_srng_dst_peek_with_dma(ab, srng, &desc_paddr);
+ if (likely(desc)) {
+ dma_sync_single_for_cpu(ab->dev, desc_paddr,
+ (srng->entry_size * sizeof(u32)),
+ DMA_FROM_DEVICE);
+ prefetch(desc);
+ }
+}
+
+u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
+
+ srng->u.dst_ring.tp += srng->entry_size;
+
+ /* wrap around to start of ring*/
+ if (srng->u.dst_ring.tp == srng->ring_size)
+ srng->u.dst_ring.tp = 0;
+
+ /* Try to prefetch the next descriptor in the ring */
+ if (srng->flags & HAL_SRNG_FLAGS_CACHED)
+ ath11k_hal_srng_prefetch_desc(ab, srng);
+
+ return desc;
+}
+
+int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr)
+{
+ u32 tp, hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ tp = srng->u.dst_ring.tp;
+
+ if (sync_hw_ptr) {
+ hp = *srng->u.dst_ring.hp_addr;
+ srng->u.dst_ring.cached_hp = hp;
+ } else {
+ hp = srng->u.dst_ring.cached_hp;
+ }
+
+ if (hp >= tp)
+ return (hp - tp) / srng->entry_size;
+ else
+ return (srng->ring_size - tp + hp) / srng->entry_size;
+}
+
+/* Returns number of available entries in src ring */
+int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr)
+{
+ u32 tp, hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ hp = srng->u.src_ring.hp;
+
+ if (sync_hw_ptr) {
+ tp = *srng->u.src_ring.tp_addr;
+ srng->u.src_ring.cached_tp = tp;
+ } else {
+ tp = srng->u.src_ring.cached_tp;
+ }
+
+ if (tp > hp)
+ return ((tp - hp) / srng->entry_size) - 1;
+ else
+ return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
+}
+
+u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+ u32 next_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ /* TODO: Using % is expensive, but we have to do this since size of some
+ * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
+ * if separate function is defined for rings having power of 2 ring size
+ * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
+ * overhead of % by using mask (with &).
+ */
+ next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
+
+ if (next_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+ srng->u.src_ring.hp = next_hp;
+
+ /* TODO: Reap functionality is not used by all rings. If particular
+ * ring does not use reap functionality, we need not update reap_hp
+ * with next_hp pointer. Need to make sure a separate function is used
+ * before doing any optimization by removing below code updating
+ * reap_hp.
+ */
+ srng->u.src_ring.reap_hp = next_hp;
+
+ return desc;
+}
+
+u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+ u32 next_reap_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
+ srng->ring_size;
+
+ if (next_reap_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + next_reap_hp;
+ srng->u.src_ring.reap_hp = next_reap_hp;
+
+ return desc;
+}
+
+u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+ srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
+ srng->ring_size;
+
+ return desc;
+}
+
+u32 *ath11k_hal_srng_src_next_peek(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ u32 next_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
+
+ if (next_hp != srng->u.src_ring.cached_tp)
+ return srng->ring_base_vaddr + next_hp;
+
+ return NULL;
+}
+
+u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
+ srng->u.src_ring.cached_tp)
+ return NULL;
+
+ return srng->ring_base_vaddr + srng->u.src_ring.hp;
+}
+
+void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ u32 hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.cached_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
+ } else {
+ hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
+
+ if (hp != srng->u.dst_ring.cached_hp) {
+ srng->u.dst_ring.cached_hp = hp;
+ /* Make sure descriptor is read after the head
+ * pointer.
+ */
+ dma_rmb();
+ }
+
+ /* Try to prefetch the next descriptor in the ring */
+ if (srng->flags & HAL_SRNG_FLAGS_CACHED)
+ ath11k_hal_srng_prefetch_desc(ab, srng);
+ }
+}
+
+/* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()
+ * should have been called before this.
+ */
+void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
+ /* For LMAC rings, ring pointer updates are done through FW and
+ * hence written to a shared memory location that is read by FW
+ */
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.last_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
+ /* Make sure descriptor is written before updating the
+ * head pointer.
+ */
+ dma_wmb();
+ WRITE_ONCE(*srng->u.src_ring.hp_addr, srng->u.src_ring.hp);
+ } else {
+ srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+ /* Make sure descriptor is read before updating the
+ * tail pointer.
+ */
+ dma_mb();
+ WRITE_ONCE(*srng->u.dst_ring.tp_addr, srng->u.dst_ring.tp);
+ }
+ } else {
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.last_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
+ /* Assume implementation use an MMIO write accessor
+ * which has the required wmb() so that the descriptor
+ * is written before the updating the head pointer.
+ */
+ ath11k_hif_write32(ab,
+ (unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->mem,
+ srng->u.src_ring.hp);
+ } else {
+ srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+ /* Make sure descriptor is read before updating the
+ * tail pointer.
+ */
+ mb();
+ ath11k_hif_write32(ab,
+ (unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->mem,
+ srng->u.dst_ring.tp);
+ }
+ }
+
+ srng->timestamp = jiffies;
+}
+
+void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset)
+{
+ struct ath11k_buffer_addr *link_addr;
+ int i;
+ u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
+
+ link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+
+ for (i = 1; i < nsbufs; i++) {
+ link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
+ link_addr->info1 = FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ (u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
+ BASE_ADDR_MATCH_TAG_VAL);
+
+ link_addr = (void *)sbuf[i].vaddr +
+ HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+ }
+
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
+ FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
+ FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
+ FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
+ reg_scatter_buf_sz * nsbufs));
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_LSB,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_MSB,
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ (u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
+ BASE_ADDR_MATCH_TAG_VAL));
+
+ /* Setup head and tail pointers for the idle list */
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[nsbufs - 1].paddr));
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ ((u64)sbuf[nsbufs - 1].paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
+ (end_offset >> 2)));
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[0].paddr));
+
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[0].paddr));
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ ((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1,
+ 0));
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
+ 2 * tot_link_desc);
+
+ /* Enable the SRNG */
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab), 0x40);
+}
+
+int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
+ int ring_num, int mac_id,
+ struct hal_srng_params *params)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
+ struct hal_srng *srng;
+ int ring_id;
+ u32 lmac_idx;
+ int i;
+ u32 reg_base;
+
+ ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
+ if (ring_id < 0)
+ return ring_id;
+
+ srng = &hal->srng_list[ring_id];
+
+ srng->ring_id = ring_id;
+ srng->ring_dir = srng_config->ring_dir;
+ srng->ring_base_paddr = params->ring_base_paddr;
+ srng->ring_base_vaddr = params->ring_base_vaddr;
+ srng->entry_size = srng_config->entry_size;
+ srng->num_entries = params->num_entries;
+ srng->ring_size = srng->entry_size * srng->num_entries;
+ srng->intr_batch_cntr_thres_entries =
+ params->intr_batch_cntr_thres_entries;
+ srng->intr_timer_thres_us = params->intr_timer_thres_us;
+ srng->flags = params->flags;
+ srng->msi_addr = params->msi_addr;
+ srng->msi_data = params->msi_data;
+ srng->initialized = 1;
+ spin_lock_init(&srng->lock);
+ lockdep_set_class(&srng->lock, hal->srng_key + ring_id);
+
+ for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
+ srng->hwreg_base[i] = srng_config->reg_start[i] +
+ (ring_num * srng_config->reg_size[i]);
+ }
+
+ memset(srng->ring_base_vaddr, 0,
+ (srng->entry_size * srng->num_entries) << 2);
+
+ /* TODO: Add comments on these swap configurations */
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
+ HAL_SRNG_FLAGS_RING_PTR_SWAP;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.hp = 0;
+ srng->u.src_ring.cached_tp = 0;
+ srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
+ srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
+ srng->u.src_ring.low_threshold = params->low_threshold *
+ srng->entry_size;
+ if (srng_config->lmac_ring) {
+ lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
+ srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
+ lmac_idx);
+ srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+ } else {
+ if (!ab->hw_params.supports_shadow_regs)
+ srng->u.src_ring.hp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base);
+ else
+ ath11k_dbg(ab, ATH11K_DBG_HAL,
+ "type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
+ type, ring_num,
+ reg_base,
+ (unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->mem);
+ }
+ } else {
+ /* During initialization loop count in all the descriptors
+ * will be set to zero, and HW will set it to 1 on completing
+ * descriptor update in first loop, and increments it by 1 on
+ * subsequent loops (loop count wraps around after reaching
+ * 0xffff). The 'loop_cnt' in SW ring state is the expected
+ * loop count in descriptors updated by HW (to be processed
+ * by SW).
+ */
+ srng->u.dst_ring.loop_cnt = 1;
+ srng->u.dst_ring.tp = 0;
+ srng->u.dst_ring.cached_hp = 0;
+ srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
+ if (srng_config->lmac_ring) {
+ /* For LMAC rings, tail pointer updates will be done
+ * through FW by writing to a shared memory location
+ */
+ lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
+ srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
+ lmac_idx);
+ srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+ } else {
+ if (!ab->hw_params.supports_shadow_regs)
+ srng->u.dst_ring.tp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base +
+ (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)));
+ else
+ ath11k_dbg(ab, ATH11K_DBG_HAL,
+ "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
+ type, ring_num,
+ reg_base + (HAL_REO1_RING_TP(ab) -
+ HAL_REO1_RING_HP(ab)),
+ (unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->mem);
+ }
+ }
+
+ if (srng_config->lmac_ring)
+ return ring_id;
+
+ ath11k_hal_srng_hw_init(ab, srng);
+
+ if (type == HAL_CE_DST) {
+ srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
+ ath11k_hal_ce_dst_setup(ab, srng, ring_num);
+ }
+
+ return ring_id;
+}
+
+static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab,
+ int shadow_cfg_idx,
+ enum hal_ring_type ring_type,
+ int ring_num)
+{
+ struct hal_srng *srng;
+ struct ath11k_hal *hal = &ab->hal;
+ int ring_id;
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+
+ ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
+ if (ring_id < 0)
+ return;
+
+ srng = &hal->srng_list[ring_id];
+
+ if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+ srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
+ (unsigned long)ab->mem);
+ else
+ srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
+ (unsigned long)ab->mem);
+}
+
+int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+ int shadow_cfg_idx = hal->num_shadow_reg_configured;
+ u32 target_reg;
+
+ if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
+ return -EINVAL;
+
+ hal->num_shadow_reg_configured++;
+
+ target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
+ target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
+ ring_num;
+
+ /* For destination ring, shadow the TP */
+ if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+ target_reg += HAL_OFFSET_FROM_HP_TO_TP;
+
+ hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
+
+ /* update hp/tp addr to hal structure*/
+ ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
+ ring_num);
+
+ ath11k_dbg(ab, ATH11K_DBG_HAL,
+ "update shadow config target_reg %x shadow reg 0x%x shadow_idx 0x%x ring_type %d ring num %d",
+ target_reg,
+ HAL_SHADOW_REG(ab, shadow_cfg_idx),
+ shadow_cfg_idx,
+ ring_type, ring_num);
+
+ return 0;
+}
+
+void ath11k_hal_srng_shadow_config(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ int ring_type, ring_num;
+
+ /* update all the non-CE srngs. */
+ for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+
+ if (ring_type == HAL_CE_SRC ||
+ ring_type == HAL_CE_DST ||
+ ring_type == HAL_CE_DST_STATUS)
+ continue;
+
+ if (srng_config->lmac_ring)
+ continue;
+
+ for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
+ ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
+ }
+}
+
+void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
+ u32 **cfg, u32 *len)
+{
+ struct ath11k_hal *hal = &ab->hal;
+
+ *len = hal->num_shadow_reg_configured;
+ *cfg = hal->shadow_reg_addr;
+}
+
+void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ /* check whether the ring is empty. Update the shadow
+ * HP only when then ring isn't empty.
+ */
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
+ *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
+ ath11k_hal_srng_access_end(ab, srng);
+}
+
+static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_srng_config *s;
+
+ hal->srng_config = kmemdup(hw_srng_config_template,
+ sizeof(hw_srng_config_template),
+ GFP_KERNEL);
+ if (!hal->srng_config)
+ return -ENOMEM;
+
+ s = &hal->srng_config[HAL_REO_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(ab);
+ s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_REO2_RING_HP(ab) - HAL_REO1_RING_HP(ab);
+
+ s = &hal->srng_config[HAL_REO_EXCEPTION];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab);
+
+ s = &hal->srng_config[HAL_REO_REINJECT];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(ab);
+
+ s = &hal->srng_config[HAL_REO_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(ab);
+
+ s = &hal->srng_config[HAL_REO_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(ab);
+
+ s = &hal->srng_config[HAL_TCL_DATA];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
+ s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
+
+ s = &hal->srng_config[HAL_CE_SRC];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
+ ATH11K_CE_OFFSET(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP +
+ ATH11K_CE_OFFSET(ab);
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
+
+ s = &hal->srng_config[HAL_CE_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
+ ATH11K_CE_OFFSET(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP +
+ ATH11K_CE_OFFSET(ab);
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+
+ s = &hal->srng_config[HAL_CE_DST_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
+ HAL_CE_DST_STATUS_RING_BASE_LSB + ATH11K_CE_OFFSET(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP +
+ ATH11K_CE_OFFSET(ab);
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+
+ s = &hal->srng_config[HAL_WBM_IDLE_LINK];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
+
+ s = &hal->srng_config[HAL_SW2WBM_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP;
+
+ s = &hal->srng_config[HAL_WBM2SW_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
+ s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
+ HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
+
+ return 0;
+}
+
+static void ath11k_hal_register_srng_key(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 ring_id;
+
+ for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
+ lockdep_register_key(hal->srng_key + ring_id);
+}
+
+static void ath11k_hal_unregister_srng_key(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 ring_id;
+
+ for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
+ lockdep_unregister_key(hal->srng_key + ring_id);
+}
+
+int ath11k_hal_srng_init(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ int ret;
+
+ memset(hal, 0, sizeof(*hal));
+
+ ret = ath11k_hal_srng_create_config(ab);
+ if (ret)
+ goto err_hal;
+
+ ret = ath11k_hal_alloc_cont_rdp(ab);
+ if (ret)
+ goto err_hal;
+
+ ret = ath11k_hal_alloc_cont_wrp(ab);
+ if (ret)
+ goto err_free_cont_rdp;
+
+ ath11k_hal_register_srng_key(ab);
+
+ return 0;
+
+err_free_cont_rdp:
+ ath11k_hal_free_cont_rdp(ab);
+
+err_hal:
+ return ret;
+}
+EXPORT_SYMBOL(ath11k_hal_srng_init);
+
+void ath11k_hal_srng_deinit(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ int i;
+
+ for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++)
+ ab->hal.srng_list[i].initialized = 0;
+
+ ath11k_hal_unregister_srng_key(ab);
+ ath11k_hal_free_cont_rdp(ab);
+ ath11k_hal_free_cont_wrp(ab);
+ kfree(hal->srng_config);
+ hal->srng_config = NULL;
+}
+EXPORT_SYMBOL(ath11k_hal_srng_deinit);
+
+void ath11k_hal_srng_clear(struct ath11k_base *ab)
+{
+ /* No need to memset rdp and wrp memory since each individual
+ * segment would get cleared in ath11k_hal_srng_src_hw_init()
+ * and ath11k_hal_srng_dst_hw_init().
+ */
+ memset(ab->hal.srng_list, 0,
+ sizeof(ab->hal.srng_list));
+ memset(ab->hal.shadow_reg_addr, 0,
+ sizeof(ab->hal.shadow_reg_addr));
+ ab->hal.avail_blk_resource = 0;
+ ab->hal.current_blk_index = 0;
+ ab->hal.num_shadow_reg_configured = 0;
+}
+EXPORT_SYMBOL(ath11k_hal_srng_clear);
+
+void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
+{
+ struct hal_srng *srng;
+ struct ath11k_ext_irq_grp *irq_grp;
+ struct ath11k_ce_pipe *ce_pipe;
+ int i;
+
+ ath11k_err(ab, "Last interrupt received for each CE:\n");
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n",
+ i, ce_pipe->pipe_num,
+ jiffies_to_msecs(jiffies - ce_pipe->timestamp));
+ }
+
+ ath11k_err(ab, "\nLast interrupt received for each group:\n");
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ irq_grp = &ab->ext_irq_grp[i];
+ ath11k_err(ab, "group_id %d %ums before\n",
+ irq_grp->grp_id,
+ jiffies_to_msecs(jiffies - irq_grp->timestamp));
+ }
+
+ for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
+ srng = &ab->hal.srng_list[i];
+
+ if (!srng->initialized)
+ continue;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ ath11k_err(ab,
+ "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
+ srng->ring_id, srng->u.src_ring.hp,
+ srng->u.src_ring.reap_hp,
+ *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
+ srng->u.src_ring.last_tp,
+ jiffies_to_msecs(jiffies - srng->timestamp));
+ else if (srng->ring_dir == HAL_SRNG_DIR_DST)
+ ath11k_err(ab,
+ "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
+ srng->ring_id, srng->u.dst_ring.tp,
+ *srng->u.dst_ring.hp_addr,
+ srng->u.dst_ring.cached_hp,
+ srng->u.dst_ring.last_hp,
+ jiffies_to_msecs(jiffies - srng->timestamp));
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
new file mode 100644
index 000000000000..82603a389bb9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -0,0 +1,978 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH11K_HAL_H
+#define ATH11K_HAL_H
+
+#include "hal_desc.h"
+#include "rx_desc.h"
+
+struct ath11k_base;
+
+#define HAL_LINK_DESC_SIZE (32 << 2)
+#define HAL_LINK_DESC_ALIGN 128
+#define HAL_NUM_MPDUS_PER_LINK_DESC 6
+#define HAL_NUM_TX_MSDUS_PER_LINK_DESC 7
+#define HAL_NUM_RX_MSDUS_PER_LINK_DESC 6
+#define HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC 12
+#define HAL_MAX_AVAIL_BLK_RES 3
+
+#define HAL_RING_BASE_ALIGN 8
+
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX 32704
+/* TODO: Check with hw team on the supported scatter buf size */
+#define HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE 8
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE (HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX - \
+ HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE)
+
+#define HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX 48
+#define HAL_DSCP_TID_TBL_SIZE 24
+
+/* calculate the register address from bar0 of shadow register x */
+#define HAL_SHADOW_BASE_ADDR(ab) ab->hw_params.regs->hal_shadow_base_addr
+#define HAL_SHADOW_NUM_REGS 36
+#define HAL_HP_OFFSET_IN_REG_START 1
+#define HAL_OFFSET_FROM_HP_TO_TP 4
+
+#define HAL_SHADOW_REG(ab, x) (HAL_SHADOW_BASE_ADDR(ab) + (4 * (x)))
+
+/* WCSS Relative address */
+#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000
+#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
+#define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000
+#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) \
+ ((ab)->hw_params.regs->hal_seq_wcss_umac_ce0_src_reg)
+#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) \
+ ((ab)->hw_params.regs->hal_seq_wcss_umac_ce0_dst_reg)
+#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) \
+ ((ab)->hw_params.regs->hal_seq_wcss_umac_ce1_src_reg)
+#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) \
+ ((ab)->hw_params.regs->hal_seq_wcss_umac_ce1_dst_reg)
+#define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000
+
+#define HAL_CE_WFSS_CE_REG_BASE 0x01b80000
+#define HAL_WLAON_REG_BASE 0x01f80000
+
+/* SW2TCL(x) R0 ring configuration address */
+#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000014
+#define HAL_TCL1_RING_DSCP_TID_MAP 0x0000002c
+#define HAL_TCL1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_tcl1_ring_base_lsb
+#define HAL_TCL1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_tcl1_ring_base_msb
+#define HAL_TCL1_RING_ID(ab) ab->hw_params.regs->hal_tcl1_ring_id
+#define HAL_TCL1_RING_MISC(ab) ab->hw_params.regs->hal_tcl1_ring_misc
+#define HAL_TCL1_RING_TP_ADDR_LSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_tp_addr_lsb
+#define HAL_TCL1_RING_TP_ADDR_MSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_tp_addr_msb
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_consumer_int_setup_ix0
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_consumer_int_setup_ix1
+#define HAL_TCL1_RING_MSI1_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_msi1_base_lsb
+#define HAL_TCL1_RING_MSI1_BASE_MSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_msi1_base_msb
+#define HAL_TCL1_RING_MSI1_DATA(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_msi1_data
+#define HAL_TCL2_RING_BASE_LSB(ab) ab->hw_params.regs->hal_tcl2_ring_base_lsb
+#define HAL_TCL_RING_BASE_LSB(ab) ab->hw_params.regs->hal_tcl_ring_base_lsb
+
+#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_DATA(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_BASE_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_ID_OFFSET(ab) \
+ (HAL_TCL1_RING_ID(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) \
+ (HAL_TCL1_RING_TP_ADDR_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_TP_ADDR_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_MISC_OFFSET(ab) \
+ (HAL_TCL1_RING_MISC(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+
+/* SW2TCL(x) R2 ring pointers (head/tail) address */
+#define HAL_TCL1_RING_HP 0x00002000
+#define HAL_TCL1_RING_TP 0x00002004
+#define HAL_TCL2_RING_HP 0x00002008
+#define HAL_TCL_RING_HP 0x00002018
+
+#define HAL_TCL1_RING_TP_OFFSET \
+ (HAL_TCL1_RING_TP - HAL_TCL1_RING_HP)
+
+/* TCL STATUS ring address */
+#define HAL_TCL_STATUS_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_tcl_status_ring_base_lsb
+#define HAL_TCL_STATUS_RING_HP 0x00002030
+
+/* REO2SW(x) R0 ring configuration address */
+#define HAL_REO1_GEN_ENABLE 0x00000000
+#define HAL_REO1_DEST_RING_CTRL_IX_0 0x00000004
+#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
+#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
+#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
+#define HAL_REO1_MISC_CTL(ab) ab->hw_params.regs->hal_reo1_misc_ctl
+#define HAL_REO1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo1_ring_base_lsb
+#define HAL_REO1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_reo1_ring_base_msb
+#define HAL_REO1_RING_ID(ab) ab->hw_params.regs->hal_reo1_ring_id
+#define HAL_REO1_RING_MISC(ab) ab->hw_params.regs->hal_reo1_ring_misc
+#define HAL_REO1_RING_HP_ADDR_LSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_hp_addr_lsb
+#define HAL_REO1_RING_HP_ADDR_MSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_hp_addr_msb
+#define HAL_REO1_RING_PRODUCER_INT_SETUP(ab) \
+ ab->hw_params.regs->hal_reo1_ring_producer_int_setup
+#define HAL_REO1_RING_MSI1_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_msi1_base_lsb
+#define HAL_REO1_RING_MSI1_BASE_MSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_msi1_base_msb
+#define HAL_REO1_RING_MSI1_DATA(ab) \
+ ab->hw_params.regs->hal_reo1_ring_msi1_data
+#define HAL_REO2_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo2_ring_base_lsb
+#define HAL_REO1_AGING_THRESH_IX_0(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_0
+#define HAL_REO1_AGING_THRESH_IX_1(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_1
+#define HAL_REO1_AGING_THRESH_IX_2(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_2
+#define HAL_REO1_AGING_THRESH_IX_3(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_3
+
+#define HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab) \
+ (HAL_REO1_RING_MSI1_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab) \
+ (HAL_REO1_RING_MSI1_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_MSI1_DATA_OFFSET(ab) \
+ (HAL_REO1_RING_MSI1_DATA(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_BASE_MSB_OFFSET(ab) \
+ (HAL_REO1_RING_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_ID_OFFSET(ab) (HAL_REO1_RING_ID(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab) \
+ (HAL_REO1_RING_PRODUCER_INT_SETUP(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab) \
+ (HAL_REO1_RING_HP_ADDR_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab) \
+ (HAL_REO1_RING_HP_ADDR_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_MISC_OFFSET(ab) \
+ (HAL_REO1_RING_MISC(ab) - HAL_REO1_RING_BASE_LSB(ab))
+
+/* REO2SW(x) R2 ring pointers (head/tail) address */
+#define HAL_REO1_RING_HP(ab) ab->hw_params.regs->hal_reo1_ring_hp
+#define HAL_REO1_RING_TP(ab) ab->hw_params.regs->hal_reo1_ring_tp
+#define HAL_REO2_RING_HP(ab) ab->hw_params.regs->hal_reo2_ring_hp
+
+#define HAL_REO1_RING_TP_OFFSET(ab) (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab))
+
+/* REO2TCL R0 ring configuration address */
+#define HAL_REO_TCL_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo_tcl_ring_base_lsb
+
+/* REO2TCL R2 ring pointer (head/tail) address */
+#define HAL_REO_TCL_RING_HP(ab) ab->hw_params.regs->hal_reo_tcl_ring_hp
+
+/* REO CMD R0 address */
+#define HAL_REO_CMD_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo_cmd_ring_base_lsb
+
+/* REO CMD R2 address */
+#define HAL_REO_CMD_HP(ab) ab->hw_params.regs->hal_reo_cmd_ring_hp
+
+/* SW2REO R0 address */
+#define HAL_SW2REO_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_sw2reo_ring_base_lsb
+
+/* SW2REO R2 address */
+#define HAL_SW2REO_RING_HP(ab) ab->hw_params.regs->hal_sw2reo_ring_hp
+
+/* CE ring R0 address */
+#define HAL_CE_DST_RING_BASE_LSB 0x00000000
+#define HAL_CE_DST_STATUS_RING_BASE_LSB 0x00000058
+#define HAL_CE_DST_RING_CTRL 0x000000b0
+
+/* CE ring R2 address */
+#define HAL_CE_DST_RING_HP 0x00000400
+#define HAL_CE_DST_STATUS_RING_HP 0x00000408
+
+/* REO status address */
+#define HAL_REO_STATUS_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo_status_ring_base_lsb
+#define HAL_REO_STATUS_HP(ab) ab->hw_params.regs->hal_reo_status_hp
+
+/* WBM Idle R0 address */
+#define HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab) \
+ ((ab)->hw_params.regs->hal_wbm_idle_link_ring_base_lsb)
+#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab) \
+ ((ab)->hw_params.regs->hal_wbm_idle_link_ring_misc)
+#define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR 0x00000048
+#define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR 0x0000004c
+#define HAL_WBM_SCATTERED_RING_BASE_LSB 0x00000058
+#define HAL_WBM_SCATTERED_RING_BASE_MSB 0x0000005c
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0 0x00000068
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1 0x0000006c
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0 0x00000078
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1 0x0000007c
+#define HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR 0x00000084
+
+/* WBM Idle R2 address */
+#define HAL_WBM_IDLE_LINK_RING_HP 0x000030b0
+
+/* SW2WBM R0 release address */
+#define HAL_WBM_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params.regs->hal_wbm_release_ring_base_lsb)
+
+/* SW2WBM R2 release address */
+#define HAL_WBM_RELEASE_RING_HP 0x00003018
+
+/* WBM2SW R0 release address */
+#define HAL_WBM0_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params.regs->hal_wbm0_release_ring_base_lsb)
+#define HAL_WBM1_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params.regs->hal_wbm1_release_ring_base_lsb)
+
+/* WBM2SW R2 release address */
+#define HAL_WBM0_RELEASE_RING_HP 0x000030c0
+#define HAL_WBM1_RELEASE_RING_HP 0x000030c8
+
+/* TCL ring field mask and offset */
+#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE BIT(1)
+#define HAL_TCL1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_TCL1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_TCL1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_TCL1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD GENMASK(15, 0)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN BIT(17)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP GENMASK(31, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP0 GENMASK(2, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP1 GENMASK(5, 3)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP2 GENMASK(8, 6)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP3 GENMASK(11, 9)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP4 GENMASK(14, 12)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP5 GENMASK(17, 15)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21)
+
+/* REO ring field mask and offset */
+#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8)
+#define HAL_REO1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_REO1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_REO1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_REO1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_REO1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_REO1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_REO1_GEN_ENABLE_FRAG_DST_RING GENMASK(25, 23)
+#define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE BIT(2)
+#define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE BIT(3)
+#define HAL_REO1_MISC_CTL_FRAGMENT_DST_RING GENMASK(20, 17)
+
+/* CE ring bit field mask and shift */
+#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0)
+
+#define HAL_ADDR_LSB_REG_MASK 0xffffffff
+
+#define HAL_ADDR_MSB_REG_SHIFT 32
+
+/* WBM ring bit field mask and shift */
+#define HAL_WBM_LINK_DESC_IDLE_LIST_MODE BIT(1)
+#define HAL_WBM_SCATTER_BUFFER_SIZE GENMASK(10, 2)
+#define HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST GENMASK(31, 16)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32 GENMASK(7, 0)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG GENMASK(31, 8)
+
+#define HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1 GENMASK(20, 8)
+#define HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1 GENMASK(20, 8)
+
+#define BASE_ADDR_MATCH_TAG_VAL 0x5
+
+#define HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_CMD_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_SRC_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff
+
+/* IPQ5018 ce registers */
+#define HAL_IPQ5018_CE_WFSS_REG_BASE 0x08400000
+#define HAL_IPQ5018_CE_SIZE 0x200000
+
+/* Add any other errors here and return them in
+ * ath11k_hal_rx_desc_get_err().
+ */
+
+enum hal_srng_ring_id {
+ HAL_SRNG_RING_ID_REO2SW1 = 0,
+ HAL_SRNG_RING_ID_REO2SW2,
+ HAL_SRNG_RING_ID_REO2SW3,
+ HAL_SRNG_RING_ID_REO2SW4,
+ HAL_SRNG_RING_ID_REO2TCL,
+ HAL_SRNG_RING_ID_SW2REO,
+
+ HAL_SRNG_RING_ID_REO_CMD = 8,
+ HAL_SRNG_RING_ID_REO_STATUS,
+
+ HAL_SRNG_RING_ID_SW2TCL1 = 16,
+ HAL_SRNG_RING_ID_SW2TCL2,
+ HAL_SRNG_RING_ID_SW2TCL3,
+ HAL_SRNG_RING_ID_SW2TCL4,
+
+ HAL_SRNG_RING_ID_SW2TCL_CMD = 24,
+ HAL_SRNG_RING_ID_TCL_STATUS,
+
+ HAL_SRNG_RING_ID_CE0_SRC = 32,
+ HAL_SRNG_RING_ID_CE1_SRC,
+ HAL_SRNG_RING_ID_CE2_SRC,
+ HAL_SRNG_RING_ID_CE3_SRC,
+ HAL_SRNG_RING_ID_CE4_SRC,
+ HAL_SRNG_RING_ID_CE5_SRC,
+ HAL_SRNG_RING_ID_CE6_SRC,
+ HAL_SRNG_RING_ID_CE7_SRC,
+ HAL_SRNG_RING_ID_CE8_SRC,
+ HAL_SRNG_RING_ID_CE9_SRC,
+ HAL_SRNG_RING_ID_CE10_SRC,
+ HAL_SRNG_RING_ID_CE11_SRC,
+
+ HAL_SRNG_RING_ID_CE0_DST = 56,
+ HAL_SRNG_RING_ID_CE1_DST,
+ HAL_SRNG_RING_ID_CE2_DST,
+ HAL_SRNG_RING_ID_CE3_DST,
+ HAL_SRNG_RING_ID_CE4_DST,
+ HAL_SRNG_RING_ID_CE5_DST,
+ HAL_SRNG_RING_ID_CE6_DST,
+ HAL_SRNG_RING_ID_CE7_DST,
+ HAL_SRNG_RING_ID_CE8_DST,
+ HAL_SRNG_RING_ID_CE9_DST,
+ HAL_SRNG_RING_ID_CE10_DST,
+ HAL_SRNG_RING_ID_CE11_DST,
+
+ HAL_SRNG_RING_ID_CE0_DST_STATUS = 80,
+ HAL_SRNG_RING_ID_CE1_DST_STATUS,
+ HAL_SRNG_RING_ID_CE2_DST_STATUS,
+ HAL_SRNG_RING_ID_CE3_DST_STATUS,
+ HAL_SRNG_RING_ID_CE4_DST_STATUS,
+ HAL_SRNG_RING_ID_CE5_DST_STATUS,
+ HAL_SRNG_RING_ID_CE6_DST_STATUS,
+ HAL_SRNG_RING_ID_CE7_DST_STATUS,
+ HAL_SRNG_RING_ID_CE8_DST_STATUS,
+ HAL_SRNG_RING_ID_CE9_DST_STATUS,
+ HAL_SRNG_RING_ID_CE10_DST_STATUS,
+ HAL_SRNG_RING_ID_CE11_DST_STATUS,
+
+ HAL_SRNG_RING_ID_WBM_IDLE_LINK = 104,
+ HAL_SRNG_RING_ID_WBM_SW_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW1_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW2_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW3_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW4_RELEASE,
+
+ HAL_SRNG_RING_ID_UMAC_ID_END = 127,
+ HAL_SRNG_RING_ID_LMAC1_ID_START,
+
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF = HAL_SRNG_RING_ID_LMAC1_ID_START,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_STATBUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+ HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
+ HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+
+ HAL_SRNG_RING_ID_LMAC1_ID_END = 143
+};
+
+/* SRNG registers are split into two groups R0 and R2 */
+#define HAL_SRNG_REG_GRP_R0 0
+#define HAL_SRNG_REG_GRP_R2 1
+#define HAL_SRNG_NUM_REG_GRP 2
+
+#define HAL_SRNG_NUM_LMACS 3
+#define HAL_SRNG_REO_EXCEPTION HAL_SRNG_RING_ID_REO2SW1
+#define HAL_SRNG_RINGS_PER_LMAC (HAL_SRNG_RING_ID_LMAC1_ID_END - \
+ HAL_SRNG_RING_ID_LMAC1_ID_START)
+#define HAL_SRNG_NUM_LMAC_RINGS (HAL_SRNG_NUM_LMACS * HAL_SRNG_RINGS_PER_LMAC)
+#define HAL_SRNG_RING_ID_MAX (HAL_SRNG_RING_ID_UMAC_ID_END + \
+ HAL_SRNG_NUM_LMAC_RINGS)
+
+enum hal_ring_type {
+ HAL_REO_DST,
+ HAL_REO_EXCEPTION,
+ HAL_REO_REINJECT,
+ HAL_REO_CMD,
+ HAL_REO_STATUS,
+ HAL_TCL_DATA,
+ HAL_TCL_CMD,
+ HAL_TCL_STATUS,
+ HAL_CE_SRC,
+ HAL_CE_DST,
+ HAL_CE_DST_STATUS,
+ HAL_WBM_IDLE_LINK,
+ HAL_SW2WBM_RELEASE,
+ HAL_WBM2SW_RELEASE,
+ HAL_RXDMA_BUF,
+ HAL_RXDMA_DST,
+ HAL_RXDMA_MONITOR_BUF,
+ HAL_RXDMA_MONITOR_STATUS,
+ HAL_RXDMA_MONITOR_DST,
+ HAL_RXDMA_MONITOR_DESC,
+ HAL_RXDMA_DIR_BUF,
+ HAL_MAX_RING_TYPES,
+};
+
+#define HAL_RX_MAX_BA_WINDOW 256
+
+#define HAL_DEFAULT_REO_TIMEOUT_USEC (40 * 1000)
+
+/**
+ * enum hal_reo_cmd_type: Enum for REO command type
+ * @HAL_REO_CMD_GET_QUEUE_STATS: Get REO queue status/stats
+ * @HAL_REO_CMD_FLUSH_QUEUE: Flush all frames in REO queue
+ * @HAL_REO_CMD_FLUSH_CACHE: Flush descriptor entries in the cache
+ * @HAL_REO_CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
+ * earlier with a 'REO_FLUSH_CACHE' command
+ * @HAL_REO_CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
+ * @HAL_REO_CMD_UPDATE_RX_QUEUE: Update REO queue settings
+ */
+enum hal_reo_cmd_type {
+ HAL_REO_CMD_GET_QUEUE_STATS = 0,
+ HAL_REO_CMD_FLUSH_QUEUE = 1,
+ HAL_REO_CMD_FLUSH_CACHE = 2,
+ HAL_REO_CMD_UNBLOCK_CACHE = 3,
+ HAL_REO_CMD_FLUSH_TIMEOUT_LIST = 4,
+ HAL_REO_CMD_UPDATE_RX_QUEUE = 5,
+};
+
+/**
+ * enum hal_reo_cmd_status: Enum for execution status of REO command
+ * @HAL_REO_CMD_SUCCESS: Command has successfully executed
+ * @HAL_REO_CMD_BLOCKED: Command could not be executed as the queue
+ * or cache was blocked
+ * @HAL_REO_CMD_FAILED: Command execution failed, could be due to
+ * invalid queue desc
+ * @HAL_REO_CMD_RESOURCE_BLOCKED:
+ * @HAL_REO_CMD_DRAIN:
+ */
+enum hal_reo_cmd_status {
+ HAL_REO_CMD_SUCCESS = 0,
+ HAL_REO_CMD_BLOCKED = 1,
+ HAL_REO_CMD_FAILED = 2,
+ HAL_REO_CMD_RESOURCE_BLOCKED = 3,
+ HAL_REO_CMD_DRAIN = 0xff,
+};
+
+struct hal_wbm_idle_scatter_list {
+ dma_addr_t paddr;
+ struct hal_wbm_link_desc *vaddr;
+};
+
+struct hal_srng_params {
+ dma_addr_t ring_base_paddr;
+ u32 *ring_base_vaddr;
+ int num_entries;
+ u32 intr_batch_cntr_thres_entries;
+ u32 intr_timer_thres_us;
+ u32 flags;
+ u32 max_buffer_len;
+ u32 low_threshold;
+ dma_addr_t msi_addr;
+ u32 msi_data;
+
+ /* Add more params as needed */
+};
+
+enum hal_srng_dir {
+ HAL_SRNG_DIR_SRC,
+ HAL_SRNG_DIR_DST
+};
+
+/* srng flags */
+#define HAL_SRNG_FLAGS_MSI_SWAP 0x00000008
+#define HAL_SRNG_FLAGS_RING_PTR_SWAP 0x00000010
+#define HAL_SRNG_FLAGS_DATA_TLV_SWAP 0x00000020
+#define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN 0x00010000
+#define HAL_SRNG_FLAGS_MSI_INTR 0x00020000
+#define HAL_SRNG_FLAGS_CACHED 0x20000000
+#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000
+#define HAL_SRNG_FLAGS_REMAP_CE_RING 0x10000000
+
+#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10)
+
+/* Common SRNG ring structure for source and destination rings */
+struct hal_srng {
+ /* Unique SRNG ring ID */
+ u8 ring_id;
+
+ /* Ring initialization done */
+ u8 initialized;
+
+ /* Interrupt/MSI value assigned to this ring */
+ int irq;
+
+ /* Physical base address of the ring */
+ dma_addr_t ring_base_paddr;
+
+ /* Virtual base address of the ring */
+ u32 *ring_base_vaddr;
+
+ /* Number of entries in ring */
+ u32 num_entries;
+
+ /* Ring size */
+ u32 ring_size;
+
+ /* Ring size mask */
+ u32 ring_size_mask;
+
+ /* Size of ring entry */
+ u32 entry_size;
+
+ /* Interrupt timer threshold - in micro seconds */
+ u32 intr_timer_thres_us;
+
+ /* Interrupt batch counter threshold - in number of ring entries */
+ u32 intr_batch_cntr_thres_entries;
+
+ /* MSI Address */
+ dma_addr_t msi_addr;
+
+ /* MSI data */
+ u32 msi_data;
+
+ /* Misc flags */
+ u32 flags;
+
+ /* Lock for serializing ring index updates */
+ spinlock_t lock;
+
+ /* Start offset of SRNG register groups for this ring
+ * TBD: See if this is required - register address can be derived
+ * from ring ID
+ */
+ u32 hwreg_base[HAL_SRNG_NUM_REG_GRP];
+
+ u64 timestamp;
+
+ /* Source or Destination ring */
+ enum hal_srng_dir ring_dir;
+
+ union {
+ struct {
+ /* SW tail pointer */
+ u32 tp;
+
+ /* Shadow head pointer location to be updated by HW */
+ volatile u32 *hp_addr;
+
+ /* Cached head pointer */
+ u32 cached_hp;
+
+ /* Tail pointer location to be updated by SW - This
+ * will be a register address and need not be
+ * accessed through SW structure
+ */
+ u32 *tp_addr;
+
+ /* Current SW loop cnt */
+ u32 loop_cnt;
+
+ /* max transfer size */
+ u16 max_buffer_length;
+
+ /* head pointer at access end */
+ u32 last_hp;
+ } dst_ring;
+
+ struct {
+ /* SW head pointer */
+ u32 hp;
+
+ /* SW reap head pointer */
+ u32 reap_hp;
+
+ /* Shadow tail pointer location to be updated by HW */
+ u32 *tp_addr;
+
+ /* Cached tail pointer */
+ u32 cached_tp;
+
+ /* Head pointer location to be updated by SW - This
+ * will be a register address and need not be accessed
+ * through SW structure
+ */
+ u32 *hp_addr;
+
+ /* Low threshold - in number of ring entries */
+ u32 low_threshold;
+
+ /* tail pointer at access end */
+ u32 last_tp;
+ } src_ring;
+ } u;
+};
+
+/* Interrupt mitigation - Batch threshold in terms of number of frames */
+#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
+#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
+#define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1
+
+/* Interrupt mitigation - timer threshold in us */
+#define HAL_SRNG_INT_TIMER_THRESHOLD_TX 1000
+#define HAL_SRNG_INT_TIMER_THRESHOLD_RX 500
+#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 256
+
+/* HW SRNG configuration table */
+struct hal_srng_config {
+ int start_ring_id;
+ u16 max_rings;
+ u16 entry_size;
+ u32 reg_start[HAL_SRNG_NUM_REG_GRP];
+ u16 reg_size[HAL_SRNG_NUM_REG_GRP];
+ u8 lmac_ring;
+ enum hal_srng_dir ring_dir;
+ u32 max_size;
+};
+
+/**
+ * enum hal_rx_buf_return_buf_manager - manager for returned rx buffers
+ *
+ * @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
+ * @HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list.
+ * @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW
+ * @HAL_RX_BUF_RBM_SW0_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host
+ * @HAL_RX_BUF_RBM_SW4_BM: For Tx completion -- returned to host
+ */
+
+enum hal_rx_buf_return_buf_manager {
+ HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST,
+ HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_FW_BM,
+ HAL_RX_BUF_RBM_SW0_BM,
+ HAL_RX_BUF_RBM_SW1_BM,
+ HAL_RX_BUF_RBM_SW2_BM,
+ HAL_RX_BUF_RBM_SW3_BM,
+ HAL_RX_BUF_RBM_SW4_BM,
+};
+
+#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000
+
+#define HAL_REO_CMD_FLG_NEED_STATUS BIT(0)
+#define HAL_REO_CMD_FLG_STATS_CLEAR BIT(1)
+#define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER BIT(2)
+#define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3)
+#define HAL_REO_CMD_FLG_FLUSH_NO_INVAL BIT(4)
+#define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS BIT(5)
+#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6)
+#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
+#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */
+#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
+#define HAL_REO_CMD_UPD0_VLD BIT(9)
+#define HAL_REO_CMD_UPD0_ALDC BIT(10)
+#define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION BIT(11)
+#define HAL_REO_CMD_UPD0_SOFT_REORDER_EN BIT(12)
+#define HAL_REO_CMD_UPD0_AC BIT(13)
+#define HAL_REO_CMD_UPD0_BAR BIT(14)
+#define HAL_REO_CMD_UPD0_RETRY BIT(15)
+#define HAL_REO_CMD_UPD0_CHECK_2K_MODE BIT(16)
+#define HAL_REO_CMD_UPD0_OOR_MODE BIT(17)
+#define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE BIT(18)
+#define HAL_REO_CMD_UPD0_PN_CHECK BIT(19)
+#define HAL_REO_CMD_UPD0_EVEN_PN BIT(20)
+#define HAL_REO_CMD_UPD0_UNEVEN_PN BIT(21)
+#define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE BIT(22)
+#define HAL_REO_CMD_UPD0_PN_SIZE BIT(23)
+#define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG BIT(24)
+#define HAL_REO_CMD_UPD0_SVLD BIT(25)
+#define HAL_REO_CMD_UPD0_SSN BIT(26)
+#define HAL_REO_CMD_UPD0_SEQ_2K_ERR BIT(27)
+#define HAL_REO_CMD_UPD0_PN_ERR BIT(28)
+#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
+#define HAL_REO_CMD_UPD0_PN BIT(30)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* fields */
+#define HAL_REO_CMD_UPD1_VLD BIT(16)
+#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
+#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
+#define HAL_REO_CMD_UPD1_SOFT_REORDER_EN BIT(20)
+#define HAL_REO_CMD_UPD1_AC GENMASK(22, 21)
+#define HAL_REO_CMD_UPD1_BAR BIT(23)
+#define HAL_REO_CMD_UPD1_RETRY BIT(24)
+#define HAL_REO_CMD_UPD1_CHECK_2K_MODE BIT(25)
+#define HAL_REO_CMD_UPD1_OOR_MODE BIT(26)
+#define HAL_REO_CMD_UPD1_PN_CHECK BIT(27)
+#define HAL_REO_CMD_UPD1_EVEN_PN BIT(28)
+#define HAL_REO_CMD_UPD1_UNEVEN_PN BIT(29)
+#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
+#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* fields */
+#define HAL_REO_CMD_UPD2_SVLD BIT(10)
+#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
+#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
+#define HAL_REO_CMD_UPD2_PN_ERR BIT(24)
+
+#define HAL_REO_DEST_RING_CTRL_HASH_RING_MAP GENMASK(31, 8)
+
+struct ath11k_hal_reo_cmd {
+ u32 addr_lo;
+ u32 flag;
+ u32 upd0;
+ u32 upd1;
+ u32 upd2;
+ u32 pn[4];
+ u16 rx_queue_num;
+ u16 min_rel;
+ u16 min_fwd;
+ u8 addr_hi;
+ u8 ac_list;
+ u8 blocking_idx;
+ u16 ba_window_size;
+ u8 pn_size;
+};
+
+enum hal_pn_type {
+ HAL_PN_TYPE_NONE,
+ HAL_PN_TYPE_WPA,
+ HAL_PN_TYPE_WAPI_EVEN,
+ HAL_PN_TYPE_WAPI_UNEVEN,
+};
+
+enum hal_ce_desc {
+ HAL_CE_DESC_SRC,
+ HAL_CE_DESC_DST,
+ HAL_CE_DESC_DST_STATUS,
+};
+
+#define HAL_HASH_ROUTING_RING_TCL 0
+#define HAL_HASH_ROUTING_RING_SW1 1
+#define HAL_HASH_ROUTING_RING_SW2 2
+#define HAL_HASH_ROUTING_RING_SW3 3
+#define HAL_HASH_ROUTING_RING_SW4 4
+#define HAL_HASH_ROUTING_RING_REL 5
+#define HAL_HASH_ROUTING_RING_FW 6
+
+struct hal_reo_status_header {
+ u16 cmd_num;
+ enum hal_reo_cmd_status cmd_status;
+ u16 cmd_exe_time;
+ u32 timestamp;
+};
+
+struct hal_reo_status_queue_stats {
+ u16 ssn;
+ u16 curr_idx;
+ u32 pn[4];
+ u32 last_rx_queue_ts;
+ u32 last_rx_dequeue_ts;
+ u32 rx_bitmap[8]; /* Bitmap from 0-255 */
+ u32 curr_mpdu_cnt;
+ u32 curr_msdu_cnt;
+ u16 fwd_due_to_bar_cnt;
+ u16 dup_cnt;
+ u32 frames_in_order_cnt;
+ u32 num_mpdu_processed_cnt;
+ u32 num_msdu_processed_cnt;
+ u32 total_num_processed_byte_cnt;
+ u32 late_rx_mpdu_cnt;
+ u32 reorder_hole_cnt;
+ u8 timeout_cnt;
+ u8 bar_rx_cnt;
+ u8 num_window_2k_jump_cnt;
+};
+
+struct hal_reo_status_flush_queue {
+ bool err_detected;
+};
+
+enum hal_reo_status_flush_cache_err_code {
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_SUCCESS,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_IN_USE,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_NOT_FOUND,
+};
+
+struct hal_reo_status_flush_cache {
+ bool err_detected;
+ enum hal_reo_status_flush_cache_err_code err_code;
+ bool cache_controller_flush_status_hit;
+ u8 cache_controller_flush_status_desc_type;
+ u8 cache_controller_flush_status_client_id;
+ u8 cache_controller_flush_status_err;
+ u8 cache_controller_flush_status_cnt;
+};
+
+enum hal_reo_status_unblock_cache_type {
+ HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE,
+ HAL_REO_STATUS_UNBLOCK_ENTIRE_CACHE_USAGE,
+};
+
+struct hal_reo_status_unblock_cache {
+ bool err_detected;
+ enum hal_reo_status_unblock_cache_type unblock_type;
+};
+
+struct hal_reo_status_flush_timeout_list {
+ bool err_detected;
+ bool list_empty;
+ u16 release_desc_cnt;
+ u16 fwd_buf_cnt;
+};
+
+enum hal_reo_threshold_idx {
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER0,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER1,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER2,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER_SUM,
+};
+
+struct hal_reo_status_desc_thresh_reached {
+ enum hal_reo_threshold_idx threshold_idx;
+ u32 link_desc_counter0;
+ u32 link_desc_counter1;
+ u32 link_desc_counter2;
+ u32 link_desc_counter_sum;
+};
+
+struct hal_reo_status {
+ struct hal_reo_status_header uniform_hdr;
+ u8 loop_cnt;
+ union {
+ struct hal_reo_status_queue_stats queue_stats;
+ struct hal_reo_status_flush_queue flush_queue;
+ struct hal_reo_status_flush_cache flush_cache;
+ struct hal_reo_status_unblock_cache unblock_cache;
+ struct hal_reo_status_flush_timeout_list timeout_list;
+ struct hal_reo_status_desc_thresh_reached desc_thresh_reached;
+ } u;
+};
+
+/* HAL context to be used to access SRNG APIs (currently used by data path
+ * and transport (CE) modules)
+ */
+struct ath11k_hal {
+ /* HAL internal state for all SRNG rings.
+ */
+ struct hal_srng srng_list[HAL_SRNG_RING_ID_MAX];
+
+ /* SRNG configuration table */
+ struct hal_srng_config *srng_config;
+
+ /* Remote pointer memory for HW/FW updates */
+ struct {
+ u32 *vaddr;
+ dma_addr_t paddr;
+ } rdp;
+
+ /* Shared memory for ring pointer updates from host to FW */
+ struct {
+ u32 *vaddr;
+ dma_addr_t paddr;
+ } wrp;
+
+ /* Available REO blocking resources bitmap */
+ u8 avail_blk_resource;
+
+ u8 current_blk_index;
+
+ /* shadow register configuration */
+ u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS];
+ int num_shadow_reg_configured;
+
+ struct lock_class_key srng_key[HAL_SRNG_RING_ID_MAX];
+};
+
+u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
+void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
+ u32 start_seq, enum hal_pn_type type);
+void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
+ struct hal_srng *srng);
+void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset);
+
+dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng);
+dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng);
+void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr);
+u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type);
+void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
+ u8 byte_swap_data);
+void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr);
+u32 ath11k_hal_ce_dst_status_get_length(void *buf);
+int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type);
+int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type);
+void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
+ struct hal_srng_params *params);
+u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng);
+int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr);
+u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_next_peek(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng);
+int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr);
+void ath11k_hal_srng_access_begin(struct ath11k_base *ab,
+ struct hal_srng *srng);
+void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng);
+int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
+ int ring_num, int mac_id,
+ struct hal_srng_params *params);
+int ath11k_hal_srng_init(struct ath11k_base *ath11k);
+void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
+void ath11k_hal_srng_clear(struct ath11k_base *ab);
+void ath11k_hal_dump_srng_stats(struct ath11k_base *ab);
+void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
+ u32 **cfg, u32 *len);
+int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num);
+void ath11k_hal_srng_shadow_config(struct ath11k_base *ab);
+void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
+ struct hal_srng *srng);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
new file mode 100644
index 000000000000..b2fd180bd28e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -0,0 +1,2494 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include "core.h"
+
+#ifndef ATH11K_HAL_DESC_H
+#define ATH11K_HAL_DESC_H
+
+#define BUFFER_ADDR_INFO0_ADDR GENMASK(31, 0)
+
+#define BUFFER_ADDR_INFO1_ADDR GENMASK(7, 0)
+#define BUFFER_ADDR_INFO1_RET_BUF_MGR GENMASK(10, 8)
+#define BUFFER_ADDR_INFO1_SW_COOKIE GENMASK(31, 11)
+
+struct ath11k_buffer_addr {
+ u32 info0;
+ u32 info1;
+} __packed;
+
+/* ath11k_buffer_addr
+ *
+ * info0
+ * Address (lower 32 bits) of the msdu buffer or msdu extension
+ * descriptor or Link descriptor
+ *
+ * addr
+ * Address (upper 8 bits) of the msdu buffer or msdu extension
+ * descriptor or Link descriptor
+ *
+ * return_buffer_manager (RBM)
+ * Consumer: WBM
+ * Producer: SW/FW
+ * Indicates to which buffer manager the buffer or MSDU_EXTENSION
+ * descriptor or link descriptor that is being pointed to shall be
+ * returned after the frame has been processed. It is used by WBM
+ * for routing purposes.
+ *
+ * Values are defined in enum %HAL_RX_BUF_RBM_
+ *
+ * sw_buffer_cookie
+ * Cookie field exclusively used by SW. HW ignores the contents,
+ * accept that it passes the programmed value on to other
+ * descriptors together with the physical address.
+ *
+ * Field can be used by SW to for example associate the buffers
+ * physical address with the virtual address.
+ */
+
+enum hal_tlv_tag {
+ HAL_MACTX_CBF_START = 0 /* 0x0 */,
+ HAL_PHYRX_DATA = 1 /* 0x1 */,
+ HAL_PHYRX_CBF_DATA_RESP = 2 /* 0x2 */,
+ HAL_PHYRX_ABORT_REQUEST = 3 /* 0x3 */,
+ HAL_PHYRX_USER_ABORT_NOTIFICATION = 4 /* 0x4 */,
+ HAL_MACTX_DATA_RESP = 5 /* 0x5 */,
+ HAL_MACTX_CBF_DATA = 6 /* 0x6 */,
+ HAL_MACTX_CBF_DONE = 7 /* 0x7 */,
+ HAL_MACRX_CBF_READ_REQUEST = 8 /* 0x8 */,
+ HAL_MACRX_CBF_DATA_REQUEST = 9 /* 0x9 */,
+ HAL_MACRX_EXPECT_NDP_RECEPTION = 10 /* 0xa */,
+ HAL_MACRX_FREEZE_CAPTURE_CHANNEL = 11 /* 0xb */,
+ HAL_MACRX_NDP_TIMEOUT = 12 /* 0xc */,
+ HAL_MACRX_ABORT_ACK = 13 /* 0xd */,
+ HAL_MACRX_REQ_IMPLICIT_FB = 14 /* 0xe */,
+ HAL_MACRX_CHAIN_MASK = 15 /* 0xf */,
+ HAL_MACRX_NAP_USER = 16 /* 0x10 */,
+ HAL_MACRX_ABORT_REQUEST = 17 /* 0x11 */,
+ HAL_PHYTX_OTHER_TRANSMIT_INFO16 = 18 /* 0x12 */,
+ HAL_PHYTX_ABORT_ACK = 19 /* 0x13 */,
+ HAL_PHYTX_ABORT_REQUEST = 20 /* 0x14 */,
+ HAL_PHYTX_PKT_END = 21 /* 0x15 */,
+ HAL_PHYTX_PPDU_HEADER_INFO_REQUEST = 22 /* 0x16 */,
+ HAL_PHYTX_REQUEST_CTRL_INFO = 23 /* 0x17 */,
+ HAL_PHYTX_DATA_REQUEST = 24 /* 0x18 */,
+ HAL_PHYTX_BF_CV_LOADING_DONE = 25 /* 0x19 */,
+ HAL_PHYTX_NAP_ACK = 26 /* 0x1a */,
+ HAL_PHYTX_NAP_DONE = 27 /* 0x1b */,
+ HAL_PHYTX_OFF_ACK = 28 /* 0x1c */,
+ HAL_PHYTX_ON_ACK = 29 /* 0x1d */,
+ HAL_PHYTX_SYNTH_OFF_ACK = 30 /* 0x1e */,
+ HAL_PHYTX_DEBUG16 = 31 /* 0x1f */,
+ HAL_MACTX_ABORT_REQUEST = 32 /* 0x20 */,
+ HAL_MACTX_ABORT_ACK = 33 /* 0x21 */,
+ HAL_MACTX_PKT_END = 34 /* 0x22 */,
+ HAL_MACTX_PRE_PHY_DESC = 35 /* 0x23 */,
+ HAL_MACTX_BF_PARAMS_COMMON = 36 /* 0x24 */,
+ HAL_MACTX_BF_PARAMS_PER_USER = 37 /* 0x25 */,
+ HAL_MACTX_PREFETCH_CV = 38 /* 0x26 */,
+ HAL_MACTX_USER_DESC_COMMON = 39 /* 0x27 */,
+ HAL_MACTX_USER_DESC_PER_USER = 40 /* 0x28 */,
+ HAL_EXAMPLE_USER_TLV_16 = 41 /* 0x29 */,
+ HAL_EXAMPLE_TLV_16 = 42 /* 0x2a */,
+ HAL_MACTX_PHY_OFF = 43 /* 0x2b */,
+ HAL_MACTX_PHY_ON = 44 /* 0x2c */,
+ HAL_MACTX_SYNTH_OFF = 45 /* 0x2d */,
+ HAL_MACTX_EXPECT_CBF_COMMON = 46 /* 0x2e */,
+ HAL_MACTX_EXPECT_CBF_PER_USER = 47 /* 0x2f */,
+ HAL_MACTX_PHY_DESC = 48 /* 0x30 */,
+ HAL_MACTX_L_SIG_A = 49 /* 0x31 */,
+ HAL_MACTX_L_SIG_B = 50 /* 0x32 */,
+ HAL_MACTX_HT_SIG = 51 /* 0x33 */,
+ HAL_MACTX_VHT_SIG_A = 52 /* 0x34 */,
+ HAL_MACTX_VHT_SIG_B_SU20 = 53 /* 0x35 */,
+ HAL_MACTX_VHT_SIG_B_SU40 = 54 /* 0x36 */,
+ HAL_MACTX_VHT_SIG_B_SU80 = 55 /* 0x37 */,
+ HAL_MACTX_VHT_SIG_B_SU160 = 56 /* 0x38 */,
+ HAL_MACTX_VHT_SIG_B_MU20 = 57 /* 0x39 */,
+ HAL_MACTX_VHT_SIG_B_MU40 = 58 /* 0x3a */,
+ HAL_MACTX_VHT_SIG_B_MU80 = 59 /* 0x3b */,
+ HAL_MACTX_VHT_SIG_B_MU160 = 60 /* 0x3c */,
+ HAL_MACTX_SERVICE = 61 /* 0x3d */,
+ HAL_MACTX_HE_SIG_A_SU = 62 /* 0x3e */,
+ HAL_MACTX_HE_SIG_A_MU_DL = 63 /* 0x3f */,
+ HAL_MACTX_HE_SIG_A_MU_UL = 64 /* 0x40 */,
+ HAL_MACTX_HE_SIG_B1_MU = 65 /* 0x41 */,
+ HAL_MACTX_HE_SIG_B2_MU = 66 /* 0x42 */,
+ HAL_MACTX_HE_SIG_B2_OFDMA = 67 /* 0x43 */,
+ HAL_MACTX_DELETE_CV = 68 /* 0x44 */,
+ HAL_MACTX_MU_UPLINK_COMMON = 69 /* 0x45 */,
+ HAL_MACTX_MU_UPLINK_USER_SETUP = 70 /* 0x46 */,
+ HAL_MACTX_OTHER_TRANSMIT_INFO = 71 /* 0x47 */,
+ HAL_MACTX_PHY_NAP = 72 /* 0x48 */,
+ HAL_MACTX_DEBUG = 73 /* 0x49 */,
+ HAL_PHYRX_ABORT_ACK = 74 /* 0x4a */,
+ HAL_PHYRX_GENERATED_CBF_DETAILS = 75 /* 0x4b */,
+ HAL_PHYRX_RSSI_LEGACY = 76 /* 0x4c */,
+ HAL_PHYRX_RSSI_HT = 77 /* 0x4d */,
+ HAL_PHYRX_USER_INFO = 78 /* 0x4e */,
+ HAL_PHYRX_PKT_END = 79 /* 0x4f */,
+ HAL_PHYRX_DEBUG = 80 /* 0x50 */,
+ HAL_PHYRX_CBF_TRANSFER_DONE = 81 /* 0x51 */,
+ HAL_PHYRX_CBF_TRANSFER_ABORT = 82 /* 0x52 */,
+ HAL_PHYRX_L_SIG_A = 83 /* 0x53 */,
+ HAL_PHYRX_L_SIG_B = 84 /* 0x54 */,
+ HAL_PHYRX_HT_SIG = 85 /* 0x55 */,
+ HAL_PHYRX_VHT_SIG_A = 86 /* 0x56 */,
+ HAL_PHYRX_VHT_SIG_B_SU20 = 87 /* 0x57 */,
+ HAL_PHYRX_VHT_SIG_B_SU40 = 88 /* 0x58 */,
+ HAL_PHYRX_VHT_SIG_B_SU80 = 89 /* 0x59 */,
+ HAL_PHYRX_VHT_SIG_B_SU160 = 90 /* 0x5a */,
+ HAL_PHYRX_VHT_SIG_B_MU20 = 91 /* 0x5b */,
+ HAL_PHYRX_VHT_SIG_B_MU40 = 92 /* 0x5c */,
+ HAL_PHYRX_VHT_SIG_B_MU80 = 93 /* 0x5d */,
+ HAL_PHYRX_VHT_SIG_B_MU160 = 94 /* 0x5e */,
+ HAL_PHYRX_HE_SIG_A_SU = 95 /* 0x5f */,
+ HAL_PHYRX_HE_SIG_A_MU_DL = 96 /* 0x60 */,
+ HAL_PHYRX_HE_SIG_A_MU_UL = 97 /* 0x61 */,
+ HAL_PHYRX_HE_SIG_B1_MU = 98 /* 0x62 */,
+ HAL_PHYRX_HE_SIG_B2_MU = 99 /* 0x63 */,
+ HAL_PHYRX_HE_SIG_B2_OFDMA = 100 /* 0x64 */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO = 101 /* 0x65 */,
+ HAL_PHYRX_COMMON_USER_INFO = 102 /* 0x66 */,
+ HAL_PHYRX_DATA_DONE = 103 /* 0x67 */,
+ HAL_RECEIVE_RSSI_INFO = 104 /* 0x68 */,
+ HAL_RECEIVE_USER_INFO = 105 /* 0x69 */,
+ HAL_MIMO_CONTROL_INFO = 106 /* 0x6a */,
+ HAL_RX_LOCATION_INFO = 107 /* 0x6b */,
+ HAL_COEX_TX_REQ = 108 /* 0x6c */,
+ HAL_DUMMY = 109 /* 0x6d */,
+ HAL_RX_TIMING_OFFSET_INFO = 110 /* 0x6e */,
+ HAL_EXAMPLE_TLV_32_NAME = 111 /* 0x6f */,
+ HAL_MPDU_LIMIT = 112 /* 0x70 */,
+ HAL_NA_LENGTH_END = 113 /* 0x71 */,
+ HAL_OLE_BUF_STATUS = 114 /* 0x72 */,
+ HAL_PCU_PPDU_SETUP_DONE = 115 /* 0x73 */,
+ HAL_PCU_PPDU_SETUP_END = 116 /* 0x74 */,
+ HAL_PCU_PPDU_SETUP_INIT = 117 /* 0x75 */,
+ HAL_PCU_PPDU_SETUP_START = 118 /* 0x76 */,
+ HAL_PDG_FES_SETUP = 119 /* 0x77 */,
+ HAL_PDG_RESPONSE = 120 /* 0x78 */,
+ HAL_PDG_TX_REQ = 121 /* 0x79 */,
+ HAL_SCH_WAIT_INSTR = 122 /* 0x7a */,
+ HAL_SCHEDULER_TLV = 123 /* 0x7b */,
+ HAL_TQM_FLOW_EMPTY_STATUS = 124 /* 0x7c */,
+ HAL_TQM_FLOW_NOT_EMPTY_STATUS = 125 /* 0x7d */,
+ HAL_TQM_GEN_MPDU_LENGTH_LIST = 126 /* 0x7e */,
+ HAL_TQM_GEN_MPDU_LENGTH_LIST_STATUS = 127 /* 0x7f */,
+ HAL_TQM_GEN_MPDUS = 128 /* 0x80 */,
+ HAL_TQM_GEN_MPDUS_STATUS = 129 /* 0x81 */,
+ HAL_TQM_REMOVE_MPDU = 130 /* 0x82 */,
+ HAL_TQM_REMOVE_MPDU_STATUS = 131 /* 0x83 */,
+ HAL_TQM_REMOVE_MSDU = 132 /* 0x84 */,
+ HAL_TQM_REMOVE_MSDU_STATUS = 133 /* 0x85 */,
+ HAL_TQM_UPDATE_TX_MPDU_COUNT = 134 /* 0x86 */,
+ HAL_TQM_WRITE_CMD = 135 /* 0x87 */,
+ HAL_OFDMA_TRIGGER_DETAILS = 136 /* 0x88 */,
+ HAL_TX_DATA = 137 /* 0x89 */,
+ HAL_TX_FES_SETUP = 138 /* 0x8a */,
+ HAL_RX_PACKET = 139 /* 0x8b */,
+ HAL_EXPECTED_RESPONSE = 140 /* 0x8c */,
+ HAL_TX_MPDU_END = 141 /* 0x8d */,
+ HAL_TX_MPDU_START = 142 /* 0x8e */,
+ HAL_TX_MSDU_END = 143 /* 0x8f */,
+ HAL_TX_MSDU_START = 144 /* 0x90 */,
+ HAL_TX_SW_MODE_SETUP = 145 /* 0x91 */,
+ HAL_TXPCU_BUFFER_STATUS = 146 /* 0x92 */,
+ HAL_TXPCU_USER_BUFFER_STATUS = 147 /* 0x93 */,
+ HAL_DATA_TO_TIME_CONFIG = 148 /* 0x94 */,
+ HAL_EXAMPLE_USER_TLV_32 = 149 /* 0x95 */,
+ HAL_MPDU_INFO = 150 /* 0x96 */,
+ HAL_PDG_USER_SETUP = 151 /* 0x97 */,
+ HAL_TX_11AH_SETUP = 152 /* 0x98 */,
+ HAL_REO_UPDATE_RX_REO_QUEUE_STATUS = 153 /* 0x99 */,
+ HAL_TX_PEER_ENTRY = 154 /* 0x9a */,
+ HAL_TX_RAW_OR_NATIVE_FRAME_SETUP = 155 /* 0x9b */,
+ HAL_EXAMPLE_STRUCT_NAME = 156 /* 0x9c */,
+ HAL_PCU_PPDU_SETUP_END_INFO = 157 /* 0x9d */,
+ HAL_PPDU_RATE_SETTING = 158 /* 0x9e */,
+ HAL_PROT_RATE_SETTING = 159 /* 0x9f */,
+ HAL_RX_MPDU_DETAILS = 160 /* 0xa0 */,
+ HAL_EXAMPLE_USER_TLV_42 = 161 /* 0xa1 */,
+ HAL_RX_MSDU_LINK = 162 /* 0xa2 */,
+ HAL_RX_REO_QUEUE = 163 /* 0xa3 */,
+ HAL_ADDR_SEARCH_ENTRY = 164 /* 0xa4 */,
+ HAL_SCHEDULER_CMD = 165 /* 0xa5 */,
+ HAL_TX_FLUSH = 166 /* 0xa6 */,
+ HAL_TQM_ENTRANCE_RING = 167 /* 0xa7 */,
+ HAL_TX_DATA_WORD = 168 /* 0xa8 */,
+ HAL_TX_MPDU_DETAILS = 169 /* 0xa9 */,
+ HAL_TX_MPDU_LINK = 170 /* 0xaa */,
+ HAL_TX_MPDU_LINK_PTR = 171 /* 0xab */,
+ HAL_TX_MPDU_QUEUE_HEAD = 172 /* 0xac */,
+ HAL_TX_MPDU_QUEUE_EXT = 173 /* 0xad */,
+ HAL_TX_MPDU_QUEUE_EXT_PTR = 174 /* 0xae */,
+ HAL_TX_MSDU_DETAILS = 175 /* 0xaf */,
+ HAL_TX_MSDU_EXTENSION = 176 /* 0xb0 */,
+ HAL_TX_MSDU_FLOW = 177 /* 0xb1 */,
+ HAL_TX_MSDU_LINK = 178 /* 0xb2 */,
+ HAL_TX_MSDU_LINK_ENTRY_PTR = 179 /* 0xb3 */,
+ HAL_RESPONSE_RATE_SETTING = 180 /* 0xb4 */,
+ HAL_TXPCU_BUFFER_BASICS = 181 /* 0xb5 */,
+ HAL_UNIFORM_DESCRIPTOR_HEADER = 182 /* 0xb6 */,
+ HAL_UNIFORM_TQM_CMD_HEADER = 183 /* 0xb7 */,
+ HAL_UNIFORM_TQM_STATUS_HEADER = 184 /* 0xb8 */,
+ HAL_USER_RATE_SETTING = 185 /* 0xb9 */,
+ HAL_WBM_BUFFER_RING = 186 /* 0xba */,
+ HAL_WBM_LINK_DESCRIPTOR_RING = 187 /* 0xbb */,
+ HAL_WBM_RELEASE_RING = 188 /* 0xbc */,
+ HAL_TX_FLUSH_REQ = 189 /* 0xbd */,
+ HAL_RX_MSDU_DETAILS = 190 /* 0xbe */,
+ HAL_TQM_WRITE_CMD_STATUS = 191 /* 0xbf */,
+ HAL_TQM_GET_MPDU_QUEUE_STATS = 192 /* 0xc0 */,
+ HAL_TQM_GET_MSDU_FLOW_STATS = 193 /* 0xc1 */,
+ HAL_EXAMPLE_USER_CTLV_32 = 194 /* 0xc2 */,
+ HAL_TX_FES_STATUS_START = 195 /* 0xc3 */,
+ HAL_TX_FES_STATUS_USER_PPDU = 196 /* 0xc4 */,
+ HAL_TX_FES_STATUS_USER_RESPONSE = 197 /* 0xc5 */,
+ HAL_TX_FES_STATUS_END = 198 /* 0xc6 */,
+ HAL_RX_TRIG_INFO = 199 /* 0xc7 */,
+ HAL_RXPCU_TX_SETUP_CLEAR = 200 /* 0xc8 */,
+ HAL_RX_FRAME_BITMAP_REQ = 201 /* 0xc9 */,
+ HAL_RX_FRAME_BITMAP_ACK = 202 /* 0xca */,
+ HAL_COEX_RX_STATUS = 203 /* 0xcb */,
+ HAL_RX_START_PARAM = 204 /* 0xcc */,
+ HAL_RX_PPDU_START = 205 /* 0xcd */,
+ HAL_RX_PPDU_END = 206 /* 0xce */,
+ HAL_RX_MPDU_START = 207 /* 0xcf */,
+ HAL_RX_MPDU_END = 208 /* 0xd0 */,
+ HAL_RX_MSDU_START = 209 /* 0xd1 */,
+ HAL_RX_MSDU_END = 210 /* 0xd2 */,
+ HAL_RX_ATTENTION = 211 /* 0xd3 */,
+ HAL_RECEIVED_RESPONSE_INFO = 212 /* 0xd4 */,
+ HAL_RX_PHY_SLEEP = 213 /* 0xd5 */,
+ HAL_RX_HEADER = 214 /* 0xd6 */,
+ HAL_RX_PEER_ENTRY = 215 /* 0xd7 */,
+ HAL_RX_FLUSH = 216 /* 0xd8 */,
+ HAL_RX_RESPONSE_REQUIRED_INFO = 217 /* 0xd9 */,
+ HAL_RX_FRAMELESS_BAR_DETAILS = 218 /* 0xda */,
+ HAL_TQM_GET_MPDU_QUEUE_STATS_STATUS = 219 /* 0xdb */,
+ HAL_TQM_GET_MSDU_FLOW_STATS_STATUS = 220 /* 0xdc */,
+ HAL_TX_CBF_INFO = 221 /* 0xdd */,
+ HAL_PCU_PPDU_SETUP_USER = 222 /* 0xde */,
+ HAL_RX_MPDU_PCU_START = 223 /* 0xdf */,
+ HAL_RX_PM_INFO = 224 /* 0xe0 */,
+ HAL_RX_USER_PPDU_END = 225 /* 0xe1 */,
+ HAL_RX_PRE_PPDU_START = 226 /* 0xe2 */,
+ HAL_RX_PREAMBLE = 227 /* 0xe3 */,
+ HAL_TX_FES_SETUP_COMPLETE = 228 /* 0xe4 */,
+ HAL_TX_LAST_MPDU_FETCHED = 229 /* 0xe5 */,
+ HAL_TXDMA_STOP_REQUEST = 230 /* 0xe6 */,
+ HAL_RXPCU_SETUP = 231 /* 0xe7 */,
+ HAL_RXPCU_USER_SETUP = 232 /* 0xe8 */,
+ HAL_TX_FES_STATUS_ACK_OR_BA = 233 /* 0xe9 */,
+ HAL_TQM_ACKED_MPDU = 234 /* 0xea */,
+ HAL_COEX_TX_RESP = 235 /* 0xeb */,
+ HAL_COEX_TX_STATUS = 236 /* 0xec */,
+ HAL_MACTX_COEX_PHY_CTRL = 237 /* 0xed */,
+ HAL_COEX_STATUS_BROADCAST = 238 /* 0xee */,
+ HAL_RESPONSE_START_STATUS = 239 /* 0xef */,
+ HAL_RESPONSE_END_STATUS = 240 /* 0xf0 */,
+ HAL_CRYPTO_STATUS = 241 /* 0xf1 */,
+ HAL_RECEIVED_TRIGGER_INFO = 242 /* 0xf2 */,
+ HAL_REO_ENTRANCE_RING = 243 /* 0xf3 */,
+ HAL_RX_MPDU_LINK = 244 /* 0xf4 */,
+ HAL_COEX_TX_STOP_CTRL = 245 /* 0xf5 */,
+ HAL_RX_PPDU_ACK_REPORT = 246 /* 0xf6 */,
+ HAL_RX_PPDU_NO_ACK_REPORT = 247 /* 0xf7 */,
+ HAL_SCH_COEX_STATUS = 248 /* 0xf8 */,
+ HAL_SCHEDULER_COMMAND_STATUS = 249 /* 0xf9 */,
+ HAL_SCHEDULER_RX_PPDU_NO_RESPONSE_STATUS = 250 /* 0xfa */,
+ HAL_TX_FES_STATUS_PROT = 251 /* 0xfb */,
+ HAL_TX_FES_STATUS_START_PPDU = 252 /* 0xfc */,
+ HAL_TX_FES_STATUS_START_PROT = 253 /* 0xfd */,
+ HAL_TXPCU_PHYTX_DEBUG32 = 254 /* 0xfe */,
+ HAL_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32 = 255 /* 0xff */,
+ HAL_TX_MPDU_COUNT_TRANSFER_END = 256 /* 0x100 */,
+ HAL_WHO_ANCHOR_OFFSET = 257 /* 0x101 */,
+ HAL_WHO_ANCHOR_VALUE = 258 /* 0x102 */,
+ HAL_WHO_CCE_INFO = 259 /* 0x103 */,
+ HAL_WHO_COMMIT = 260 /* 0x104 */,
+ HAL_WHO_COMMIT_DONE = 261 /* 0x105 */,
+ HAL_WHO_FLUSH = 262 /* 0x106 */,
+ HAL_WHO_L2_LLC = 263 /* 0x107 */,
+ HAL_WHO_L2_PAYLOAD = 264 /* 0x108 */,
+ HAL_WHO_L3_CHECKSUM = 265 /* 0x109 */,
+ HAL_WHO_L3_INFO = 266 /* 0x10a */,
+ HAL_WHO_L4_CHECKSUM = 267 /* 0x10b */,
+ HAL_WHO_L4_INFO = 268 /* 0x10c */,
+ HAL_WHO_MSDU = 269 /* 0x10d */,
+ HAL_WHO_MSDU_MISC = 270 /* 0x10e */,
+ HAL_WHO_PACKET_DATA = 271 /* 0x10f */,
+ HAL_WHO_PACKET_HDR = 272 /* 0x110 */,
+ HAL_WHO_PPDU_END = 273 /* 0x111 */,
+ HAL_WHO_PPDU_START = 274 /* 0x112 */,
+ HAL_WHO_TSO = 275 /* 0x113 */,
+ HAL_WHO_WMAC_HEADER_PV0 = 276 /* 0x114 */,
+ HAL_WHO_WMAC_HEADER_PV1 = 277 /* 0x115 */,
+ HAL_WHO_WMAC_IV = 278 /* 0x116 */,
+ HAL_MPDU_INFO_END = 279 /* 0x117 */,
+ HAL_MPDU_INFO_BITMAP = 280 /* 0x118 */,
+ HAL_TX_QUEUE_EXTENSION = 281 /* 0x119 */,
+ HAL_RX_PEER_ENTRY_DETAILS = 282 /* 0x11a */,
+ HAL_RX_REO_QUEUE_REFERENCE = 283 /* 0x11b */,
+ HAL_RX_REO_QUEUE_EXT = 284 /* 0x11c */,
+ HAL_SCHEDULER_SELFGEN_RESPONSE_STATUS = 285 /* 0x11d */,
+ HAL_TQM_UPDATE_TX_MPDU_COUNT_STATUS = 286 /* 0x11e */,
+ HAL_TQM_ACKED_MPDU_STATUS = 287 /* 0x11f */,
+ HAL_TQM_ADD_MSDU_STATUS = 288 /* 0x120 */,
+ HAL_RX_MPDU_LINK_PTR = 289 /* 0x121 */,
+ HAL_REO_DESTINATION_RING = 290 /* 0x122 */,
+ HAL_TQM_LIST_GEN_DONE = 291 /* 0x123 */,
+ HAL_WHO_TERMINATE = 292 /* 0x124 */,
+ HAL_TX_LAST_MPDU_END = 293 /* 0x125 */,
+ HAL_TX_CV_DATA = 294 /* 0x126 */,
+ HAL_TCL_ENTRANCE_FROM_PPE_RING = 295 /* 0x127 */,
+ HAL_PPDU_TX_END = 296 /* 0x128 */,
+ HAL_PROT_TX_END = 297 /* 0x129 */,
+ HAL_PDG_RESPONSE_RATE_SETTING = 298 /* 0x12a */,
+ HAL_MPDU_INFO_GLOBAL_END = 299 /* 0x12b */,
+ HAL_TQM_SCH_INSTR_GLOBAL_END = 300 /* 0x12c */,
+ HAL_RX_PPDU_END_USER_STATS = 301 /* 0x12d */,
+ HAL_RX_PPDU_END_USER_STATS_EXT = 302 /* 0x12e */,
+ HAL_NO_ACK_REPORT = 303 /* 0x12f */,
+ HAL_ACK_REPORT = 304 /* 0x130 */,
+ HAL_UNIFORM_REO_CMD_HEADER = 305 /* 0x131 */,
+ HAL_REO_GET_QUEUE_STATS = 306 /* 0x132 */,
+ HAL_REO_FLUSH_QUEUE = 307 /* 0x133 */,
+ HAL_REO_FLUSH_CACHE = 308 /* 0x134 */,
+ HAL_REO_UNBLOCK_CACHE = 309 /* 0x135 */,
+ HAL_UNIFORM_REO_STATUS_HEADER = 310 /* 0x136 */,
+ HAL_REO_GET_QUEUE_STATS_STATUS = 311 /* 0x137 */,
+ HAL_REO_FLUSH_QUEUE_STATUS = 312 /* 0x138 */,
+ HAL_REO_FLUSH_CACHE_STATUS = 313 /* 0x139 */,
+ HAL_REO_UNBLOCK_CACHE_STATUS = 314 /* 0x13a */,
+ HAL_TQM_FLUSH_CACHE = 315 /* 0x13b */,
+ HAL_TQM_UNBLOCK_CACHE = 316 /* 0x13c */,
+ HAL_TQM_FLUSH_CACHE_STATUS = 317 /* 0x13d */,
+ HAL_TQM_UNBLOCK_CACHE_STATUS = 318 /* 0x13e */,
+ HAL_RX_PPDU_END_STATUS_DONE = 319 /* 0x13f */,
+ HAL_RX_STATUS_BUFFER_DONE = 320 /* 0x140 */,
+ HAL_BUFFER_ADDR_INFO = 321 /* 0x141 */,
+ HAL_RX_MSDU_DESC_INFO = 322 /* 0x142 */,
+ HAL_RX_MPDU_DESC_INFO = 323 /* 0x143 */,
+ HAL_TCL_DATA_CMD = 324 /* 0x144 */,
+ HAL_TCL_GSE_CMD = 325 /* 0x145 */,
+ HAL_TCL_EXIT_BASE = 326 /* 0x146 */,
+ HAL_TCL_COMPACT_EXIT_RING = 327 /* 0x147 */,
+ HAL_TCL_REGULAR_EXIT_RING = 328 /* 0x148 */,
+ HAL_TCL_EXTENDED_EXIT_RING = 329 /* 0x149 */,
+ HAL_UPLINK_COMMON_INFO = 330 /* 0x14a */,
+ HAL_UPLINK_USER_SETUP_INFO = 331 /* 0x14b */,
+ HAL_TX_DATA_SYNC = 332 /* 0x14c */,
+ HAL_PHYRX_CBF_READ_REQUEST_ACK = 333 /* 0x14d */,
+ HAL_TCL_STATUS_RING = 334 /* 0x14e */,
+ HAL_TQM_GET_MPDU_HEAD_INFO = 335 /* 0x14f */,
+ HAL_TQM_SYNC_CMD = 336 /* 0x150 */,
+ HAL_TQM_GET_MPDU_HEAD_INFO_STATUS = 337 /* 0x151 */,
+ HAL_TQM_SYNC_CMD_STATUS = 338 /* 0x152 */,
+ HAL_TQM_THRESHOLD_DROP_NOTIFICATION_STATUS = 339 /* 0x153 */,
+ HAL_TQM_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 340 /* 0x154 */,
+ HAL_REO_FLUSH_TIMEOUT_LIST = 341 /* 0x155 */,
+ HAL_REO_FLUSH_TIMEOUT_LIST_STATUS = 342 /* 0x156 */,
+ HAL_REO_TO_PPE_RING = 343 /* 0x157 */,
+ HAL_RX_MPDU_INFO = 344 /* 0x158 */,
+ HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 345 /* 0x159 */,
+ HAL_SCHEDULER_RX_SIFS_RESPONSE_TRIGGER_STATUS = 346 /* 0x15a */,
+ HAL_EXAMPLE_USER_TLV_32_NAME = 347 /* 0x15b */,
+ HAL_RX_PPDU_START_USER_INFO = 348 /* 0x15c */,
+ HAL_RX_RXPCU_CLASSIFICATION_OVERVIEW = 349 /* 0x15d */,
+ HAL_RX_RING_MASK = 350 /* 0x15e */,
+ HAL_WHO_CLASSIFY_INFO = 351 /* 0x15f */,
+ HAL_TXPT_CLASSIFY_INFO = 352 /* 0x160 */,
+ HAL_RXPT_CLASSIFY_INFO = 353 /* 0x161 */,
+ HAL_TX_FLOW_SEARCH_ENTRY = 354 /* 0x162 */,
+ HAL_RX_FLOW_SEARCH_ENTRY = 355 /* 0x163 */,
+ HAL_RECEIVED_TRIGGER_INFO_DETAILS = 356 /* 0x164 */,
+ HAL_COEX_MAC_NAP = 357 /* 0x165 */,
+ HAL_MACRX_ABORT_REQUEST_INFO = 358 /* 0x166 */,
+ HAL_MACTX_ABORT_REQUEST_INFO = 359 /* 0x167 */,
+ HAL_PHYRX_ABORT_REQUEST_INFO = 360 /* 0x168 */,
+ HAL_PHYTX_ABORT_REQUEST_INFO = 361 /* 0x169 */,
+ HAL_RXPCU_PPDU_END_INFO = 362 /* 0x16a */,
+ HAL_WHO_MESH_CONTROL = 363 /* 0x16b */,
+ HAL_L_SIG_A_INFO = 364 /* 0x16c */,
+ HAL_L_SIG_B_INFO = 365 /* 0x16d */,
+ HAL_HT_SIG_INFO = 366 /* 0x16e */,
+ HAL_VHT_SIG_A_INFO = 367 /* 0x16f */,
+ HAL_VHT_SIG_B_SU20_INFO = 368 /* 0x170 */,
+ HAL_VHT_SIG_B_SU40_INFO = 369 /* 0x171 */,
+ HAL_VHT_SIG_B_SU80_INFO = 370 /* 0x172 */,
+ HAL_VHT_SIG_B_SU160_INFO = 371 /* 0x173 */,
+ HAL_VHT_SIG_B_MU20_INFO = 372 /* 0x174 */,
+ HAL_VHT_SIG_B_MU40_INFO = 373 /* 0x175 */,
+ HAL_VHT_SIG_B_MU80_INFO = 374 /* 0x176 */,
+ HAL_VHT_SIG_B_MU160_INFO = 375 /* 0x177 */,
+ HAL_SERVICE_INFO = 376 /* 0x178 */,
+ HAL_HE_SIG_A_SU_INFO = 377 /* 0x179 */,
+ HAL_HE_SIG_A_MU_DL_INFO = 378 /* 0x17a */,
+ HAL_HE_SIG_A_MU_UL_INFO = 379 /* 0x17b */,
+ HAL_HE_SIG_B1_MU_INFO = 380 /* 0x17c */,
+ HAL_HE_SIG_B2_MU_INFO = 381 /* 0x17d */,
+ HAL_HE_SIG_B2_OFDMA_INFO = 382 /* 0x17e */,
+ HAL_PDG_SW_MODE_BW_START = 383 /* 0x17f */,
+ HAL_PDG_SW_MODE_BW_END = 384 /* 0x180 */,
+ HAL_PDG_WAIT_FOR_MAC_REQUEST = 385 /* 0x181 */,
+ HAL_PDG_WAIT_FOR_PHY_REQUEST = 386 /* 0x182 */,
+ HAL_SCHEDULER_END = 387 /* 0x183 */,
+ HAL_PEER_TABLE_ENTRY = 388 /* 0x184 */,
+ HAL_SW_PEER_INFO = 389 /* 0x185 */,
+ HAL_RXOLE_CCE_CLASSIFY_INFO = 390 /* 0x186 */,
+ HAL_TCL_CCE_CLASSIFY_INFO = 391 /* 0x187 */,
+ HAL_RXOLE_CCE_INFO = 392 /* 0x188 */,
+ HAL_TCL_CCE_INFO = 393 /* 0x189 */,
+ HAL_TCL_CCE_SUPERRULE = 394 /* 0x18a */,
+ HAL_CCE_RULE = 395 /* 0x18b */,
+ HAL_RX_PPDU_START_DROPPED = 396 /* 0x18c */,
+ HAL_RX_PPDU_END_DROPPED = 397 /* 0x18d */,
+ HAL_RX_PPDU_END_STATUS_DONE_DROPPED = 398 /* 0x18e */,
+ HAL_RX_MPDU_START_DROPPED = 399 /* 0x18f */,
+ HAL_RX_MSDU_START_DROPPED = 400 /* 0x190 */,
+ HAL_RX_MSDU_END_DROPPED = 401 /* 0x191 */,
+ HAL_RX_MPDU_END_DROPPED = 402 /* 0x192 */,
+ HAL_RX_ATTENTION_DROPPED = 403 /* 0x193 */,
+ HAL_TXPCU_USER_SETUP = 404 /* 0x194 */,
+ HAL_RXPCU_USER_SETUP_EXT = 405 /* 0x195 */,
+ HAL_CE_SRC_DESC = 406 /* 0x196 */,
+ HAL_CE_STAT_DESC = 407 /* 0x197 */,
+ HAL_RXOLE_CCE_SUPERRULE = 408 /* 0x198 */,
+ HAL_TX_RATE_STATS_INFO = 409 /* 0x199 */,
+ HAL_CMD_PART_0_END = 410 /* 0x19a */,
+ HAL_MACTX_SYNTH_ON = 411 /* 0x19b */,
+ HAL_SCH_CRITICAL_TLV_REFERENCE = 412 /* 0x19c */,
+ HAL_TQM_MPDU_GLOBAL_START = 413 /* 0x19d */,
+ HAL_EXAMPLE_TLV_32 = 414 /* 0x19e */,
+ HAL_TQM_UPDATE_TX_MSDU_FLOW = 415 /* 0x19f */,
+ HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD = 416 /* 0x1a0 */,
+ HAL_TQM_UPDATE_TX_MSDU_FLOW_STATUS = 417 /* 0x1a1 */,
+ HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD_STATUS = 418 /* 0x1a2 */,
+ HAL_REO_UPDATE_RX_REO_QUEUE = 419 /* 0x1a3 */,
+ HAL_CE_DST_DESC = 420 /* 0x1a4 */,
+ HAL_TLV_BASE = 511 /* 0x1ff */,
+};
+
+#define HAL_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_TLV_HDR_LEN GENMASK(25, 10)
+#define HAL_TLV_USR_ID GENMASK(31, 26)
+
+#define HAL_TLV_ALIGN 4
+
+struct hal_tlv_hdr {
+ u32 tl;
+ u8 value[];
+} __packed;
+
+#define RX_MPDU_DESC_INFO0_MSDU_COUNT GENMASK(7, 0)
+#define RX_MPDU_DESC_INFO0_SEQ_NUM GENMASK(19, 8)
+#define RX_MPDU_DESC_INFO0_FRAG_FLAG BIT(20)
+#define RX_MPDU_DESC_INFO0_MPDU_RETRY BIT(21)
+#define RX_MPDU_DESC_INFO0_AMPDU_FLAG BIT(22)
+#define RX_MPDU_DESC_INFO0_BAR_FRAME BIT(23)
+#define RX_MPDU_DESC_INFO0_VALID_PN BIT(24)
+#define RX_MPDU_DESC_INFO0_VALID_SA BIT(25)
+#define RX_MPDU_DESC_INFO0_SA_IDX_TIMEOUT BIT(26)
+#define RX_MPDU_DESC_INFO0_VALID_DA BIT(27)
+#define RX_MPDU_DESC_INFO0_DA_MCBC BIT(28)
+#define RX_MPDU_DESC_INFO0_DA_IDX_TIMEOUT BIT(29)
+#define RX_MPDU_DESC_INFO0_RAW_MPDU BIT(30)
+
+#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0)
+
+struct rx_mpdu_desc {
+ u32 info0; /* %RX_MPDU_DESC_INFO */
+ u32 meta_data;
+} __packed;
+
+/* rx_mpdu_desc
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ *
+ * msdu_count
+ * The number of MSDUs within the MPDU
+ *
+ * mpdu_sequence_number
+ * The field can have two different meanings based on the setting
+ * of field 'bar_frame'. If 'bar_frame' is set, it means the MPDU
+ * start sequence number from the BAR frame otherwise it means
+ * the MPDU sequence number of the received frame.
+ *
+ * fragment_flag
+ * When set, this MPDU is a fragment and REO should forward this
+ * fragment MPDU to the REO destination ring without any reorder
+ * checks, pn checks or bitmap update. This implies that REO is
+ * forwarding the pointer to the MSDU link descriptor.
+ *
+ * mpdu_retry_bit
+ * The retry bit setting from the MPDU header of the received frame
+ *
+ * ampdu_flag
+ * Indicates the MPDU was received as part of an A-MPDU.
+ *
+ * bar_frame
+ * Indicates the received frame is a BAR frame. After processing,
+ * this frame shall be pushed to SW or deleted.
+ *
+ * valid_pn
+ * When not set, REO will not perform a PN sequence number check.
+ *
+ * valid_sa
+ * Indicates OLE found a valid SA entry for all MSDUs in this MPDU.
+ *
+ * sa_idx_timeout
+ * Indicates, at least 1 MSDU within the MPDU has an unsuccessful
+ * MAC source address search due to the expiration of search timer.
+ *
+ * valid_da
+ * When set, OLE found a valid DA entry for all MSDUs in this MPDU.
+ *
+ * da_mcbc
+ * Field Only valid if valid_da is set. Indicates at least one of
+ * the DA addresses is a Multicast or Broadcast address.
+ *
+ * da_idx_timeout
+ * Indicates, at least 1 MSDU within the MPDU has an unsuccessful
+ * MAC destination address search due to the expiration of search
+ * timer.
+ *
+ * raw_mpdu
+ * Field only valid when first_msdu_in_mpdu_flag is set. Indicates
+ * the contents in the MSDU buffer contains a 'RAW' MPDU.
+ */
+
+enum hal_rx_msdu_desc_reo_dest_ind {
+ HAL_RX_MSDU_DESC_REO_DEST_IND_TCL,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW1,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW2,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW3,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW4,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_RELEASE,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_FW,
+};
+
+#define RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU BIT(0)
+#define RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU BIT(1)
+#define RX_MSDU_DESC_INFO0_MSDU_CONTINUATION BIT(2)
+#define RX_MSDU_DESC_INFO0_MSDU_LENGTH GENMASK(16, 3)
+#define RX_MSDU_DESC_INFO0_REO_DEST_IND GENMASK(21, 17)
+#define RX_MSDU_DESC_INFO0_MSDU_DROP BIT(22)
+#define RX_MSDU_DESC_INFO0_VALID_SA BIT(23)
+#define RX_MSDU_DESC_INFO0_SA_IDX_TIMEOUT BIT(24)
+#define RX_MSDU_DESC_INFO0_VALID_DA BIT(25)
+#define RX_MSDU_DESC_INFO0_DA_MCBC BIT(26)
+#define RX_MSDU_DESC_INFO0_DA_IDX_TIMEOUT BIT(27)
+
+#define HAL_RX_MSDU_PKT_LENGTH_GET(val) \
+ (FIELD_GET(RX_MSDU_DESC_INFO0_MSDU_LENGTH, (val)))
+
+struct rx_msdu_desc {
+ u32 info0;
+ u32 rsvd0;
+} __packed;
+
+/* rx_msdu_desc
+ *
+ * first_msdu_in_mpdu
+ * Indicates first msdu in mpdu.
+ *
+ * last_msdu_in_mpdu
+ * Indicates last msdu in mpdu. This flag can be true only when
+ * 'Msdu_continuation' set to 0. This implies that when an msdu
+ * is spread out over multiple buffers and thus msdu_continuation
+ * is set, only for the very last buffer of the msdu, can the
+ * 'last_msdu_in_mpdu' be set.
+ *
+ * When both first_msdu_in_mpdu and last_msdu_in_mpdu are set,
+ * the MPDU that this MSDU belongs to only contains a single MSDU.
+ *
+ * msdu_continuation
+ * When set, this MSDU buffer was not able to hold the entire MSDU.
+ * The next buffer will therefore contain additional information
+ * related to this MSDU.
+ *
+ * msdu_length
+ * Field is only valid in combination with the 'first_msdu_in_mpdu'
+ * being set. Full MSDU length in bytes after decapsulation. This
+ * field is still valid for MPDU frames without A-MSDU. It still
+ * represents MSDU length after decapsulation Or in case of RAW
+ * MPDUs, it indicates the length of the entire MPDU (without FCS
+ * field).
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * msdu_drop
+ * Indicates that REO shall drop this MSDU and not forward it to
+ * any other ring.
+ *
+ * valid_sa
+ * Indicates OLE found a valid SA entry for this MSDU.
+ *
+ * sa_idx_timeout
+ * Indicates, an unsuccessful MAC source address search due to
+ * the expiration of search timer for this MSDU.
+ *
+ * valid_da
+ * When set, OLE found a valid DA entry for this MSDU.
+ *
+ * da_mcbc
+ * Field Only valid if valid_da is set. Indicates the DA address
+ * is a Multicast or Broadcast address for this MSDU.
+ *
+ * da_idx_timeout
+ * Indicates, an unsuccessful MAC destination address search due
+ * to the expiration of search timer for this MSDU.
+ */
+
+enum hal_reo_dest_ring_buffer_type {
+ HAL_REO_DEST_RING_BUFFER_TYPE_MSDU,
+ HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC,
+};
+
+enum hal_reo_dest_ring_push_reason {
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED,
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION,
+};
+
+enum hal_reo_dest_ring_error_code {
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID,
+ HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA,
+ HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED,
+ HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED,
+ HAL_REO_DEST_RING_ERROR_CODE_MAX,
+};
+
+#define HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_DEST_RING_INFO0_BUFFER_TYPE BIT(8)
+#define HAL_REO_DEST_RING_INFO0_PUSH_REASON GENMASK(10, 9)
+#define HAL_REO_DEST_RING_INFO0_ERROR_CODE GENMASK(15, 11)
+#define HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM GENMASK(31, 16)
+
+#define HAL_REO_DEST_RING_INFO1_REORDER_INFO_VALID BIT(0)
+#define HAL_REO_DEST_RING_INFO1_REORDER_OPCODE GENMASK(4, 1)
+#define HAL_REO_DEST_RING_INFO1_REORDER_SLOT_IDX GENMASK(12, 5)
+
+#define HAL_REO_DEST_RING_INFO2_RING_ID GENMASK(27, 20)
+#define HAL_REO_DEST_RING_INFO2_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_reo_dest_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct rx_msdu_desc rx_msdu_info;
+ u32 queue_addr_lo;
+ u32 info0; /* %HAL_REO_DEST_RING_INFO0_ */
+ u32 info1; /* %HAL_REO_DEST_RING_INFO1_ */
+ u32 rsvd0;
+ u32 rsvd1;
+ u32 rsvd2;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+ u32 info2; /* %HAL_REO_DEST_RING_INFO2_ */
+} __packed;
+
+/* hal_reo_dest_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * General information related to the MPDU that is passed
+ * on from REO entrance ring to the REO destination ring.
+ *
+ * rx_msdu_info
+ * General information related to the MSDU that is passed
+ * on from RXDMA all the way to the REO destination ring.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * buffer_type
+ * Indicates the type of address provided in the buf_addr_info.
+ * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * push_reason
+ * Reason for pushing this frame to this exit ring. Values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * error_code
+ * Valid only when 'push_reason' is set. All error codes are
+ * defined in enum %HAL_REO_DEST_RING_ERROR_CODE_.
+ *
+ * rx_queue_num
+ * Indicates the REO MPDU reorder queue id from which this frame
+ * originated.
+ *
+ * reorder_info_valid
+ * When set, REO has been instructed to not perform the actual
+ * re-ordering of frames for this queue, but just to insert
+ * the reorder opcodes.
+ *
+ * reorder_opcode
+ * Field is valid when 'reorder_info_valid' is set. This field is
+ * always valid for debug purpose as well.
+ *
+ * reorder_slot_idx
+ * Valid only when 'reorder_info_valid' is set.
+ *
+ * ring_id
+ * The buffer pointer ring id.
+ * 0 - Idle ring
+ * 1 - N refers to other rings.
+ *
+ * looping_count
+ * Indicates the number of times the producer of entries into
+ * this ring has looped around the ring.
+ */
+
+enum hal_reo_entr_rxdma_ecode {
+ HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MAX,
+};
+
+#define HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_ENTR_RING_INFO0_MPDU_BYTE_COUNT GENMASK(21, 8)
+#define HAL_REO_ENTR_RING_INFO0_DEST_IND GENMASK(26, 22)
+#define HAL_REO_ENTR_RING_INFO0_FRAMELESS_BAR BIT(27)
+
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON GENMASK(1, 0)
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE GENMASK(6, 2)
+
+struct hal_reo_entrance_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ u32 queue_addr_lo;
+ u32 info0; /* %HAL_REO_ENTR_RING_INFO0_ */
+ u32 info1; /* %HAL_REO_ENTR_RING_INFO1_ */
+ u32 info2; /* %HAL_REO_DEST_RING_INFO2_ */
+
+} __packed;
+
+/* hal_reo_entrance_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * General information related to the MPDU that is passed
+ * on from REO entrance ring to the REO destination ring.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * mpdu_byte_count
+ * An approximation of the number of bytes received in this MPDU.
+ * Used to keeps stats on the amount of data flowing
+ * through a queue.
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * frameless_bar
+ * Indicates that this REO entrance ring struct contains BAR info
+ * from a multi TID BAR frame. The original multi TID BAR frame
+ * itself contained all the REO info for the first TID, but all
+ * the subsequent TID info and their linkage to the REO descriptors
+ * is passed down as 'frameless' BAR info.
+ *
+ * The only fields valid in this descriptor when this bit is set
+ * are queue_addr_lo, queue_addr_hi, mpdu_sequence_number,
+ * bar_frame and peer_meta_data.
+ *
+ * rxdma_push_reason
+ * Reason for pushing this frame to this exit ring. Values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * rxdma_error_code
+ * Valid only when 'push_reason' is set. All error codes are
+ * defined in enum %HAL_REO_ENTR_RING_RXDMA_ECODE_.
+ *
+ * ring_id
+ * The buffer pointer ring id.
+ * 0 - Idle ring
+ * 1 - N refers to other rings.
+ *
+ * looping_count
+ * Indicates the number of times the producer of entries into
+ * this ring has looped around the ring.
+ */
+
+#define HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON GENMASK(1, 0)
+#define HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE GENMASK(6, 2)
+#define HAL_SW_MON_RING_INFO0_MPDU_FRAG_NUMBER GENMASK(10, 7)
+#define HAL_SW_MON_RING_INFO0_FRAMELESS_BAR BIT(11)
+#define HAL_SW_MON_RING_INFO0_STATUS_BUF_CNT GENMASK(15, 12)
+#define HAL_SW_MON_RING_INFO0_END_OF_PPDU BIT(16)
+
+#define HAL_SW_MON_RING_INFO1_PHY_PPDU_ID GENMASK(15, 0)
+#define HAL_SW_MON_RING_INFO1_RING_ID GENMASK(27, 20)
+#define HAL_SW_MON_RING_INFO1_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_sw_monitor_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct ath11k_buffer_addr status_buf_addr_info;
+ u32 info0;
+ u32 info1;
+} __packed;
+
+#define HAL_REO_CMD_HDR_INFO0_CMD_NUMBER GENMASK(15, 0)
+#define HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED BIT(16)
+
+struct hal_reo_cmd_hdr {
+ u32 info0;
+} __packed;
+
+#define HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS BIT(8)
+
+struct hal_reo_get_queue_stats {
+ struct hal_reo_cmd_hdr cmd;
+ u32 queue_addr_lo;
+ u32 info0;
+ u32 rsvd0[6];
+} __packed;
+
+/* hal_reo_get_queue_stats
+ * Producer: SW
+ * Consumer: REO
+ *
+ * cmd
+ * Details for command execution tracking purposes.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * clear_stats
+ * Clear stats settings. When set, Clear the stats after
+ * generating the status.
+ *
+ * Following stats will be cleared.
+ * Timeout_count
+ * Forward_due_to_bar_count
+ * Duplicate_count
+ * Frames_in_order_count
+ * BAR_received_count
+ * MPDU_Frames_processed_count
+ * MSDU_Frames_processed_count
+ * Total_processed_byte_count
+ * Late_receive_MPDU_count
+ * window_jump_2k
+ * Hole_count
+ */
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_DESC_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_DESC_ADDR BIT(8)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_RESRC_IDX GENMASK(10, 9)
+
+struct hal_reo_flush_queue {
+ struct hal_reo_cmd_hdr cmd;
+ u32 desc_addr_lo;
+ u32 info0;
+ u32 rsvd0[6];
+} __packed;
+
+#define HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS BIT(8)
+#define HAL_REO_FLUSH_CACHE_INFO0_RELEASE_BLOCK_IDX BIT(9)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX GENMASK(11, 10)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE BIT(12)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE BIT(13)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL BIT(14)
+
+struct hal_reo_flush_cache {
+ struct hal_reo_cmd_hdr cmd;
+ u32 cache_addr_lo;
+ u32 info0;
+ u32 rsvd0[6];
+} __packed;
+
+#define HAL_TCL_DATA_CMD_INFO0_DESC_TYPE BIT(0)
+#define HAL_TCL_DATA_CMD_INFO0_EPD BIT(1)
+#define HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE GENMASK(3, 2)
+#define HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE GENMASK(7, 4)
+#define HAL_TCL_DATA_CMD_INFO0_SRC_BUF_SWAP BIT(8)
+#define HAL_TCL_DATA_CMD_INFO0_LNK_META_SWAP BIT(9)
+#define HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE GENMASK(13, 12)
+#define HAL_TCL_DATA_CMD_INFO0_ADDR_EN GENMASK(15, 14)
+#define HAL_TCL_DATA_CMD_INFO0_CMD_NUM GENMASK(31, 16)
+
+#define HAL_TCL_DATA_CMD_INFO1_DATA_LEN GENMASK(15, 0)
+#define HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN BIT(16)
+#define HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN BIT(17)
+#define HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN BIT(18)
+#define HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN BIT(19)
+#define HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN BIT(20)
+#define HAL_TCL_DATA_CMD_INFO1_TO_FW BIT(21)
+#define HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET GENMASK(31, 23)
+
+#define HAL_TCL_DATA_CMD_INFO2_BUF_TIMESTAMP GENMASK(18, 0)
+#define HAL_TCL_DATA_CMD_INFO2_BUF_T_VALID BIT(19)
+#define HAL_IPQ8074_TCL_DATA_CMD_INFO2_MESH_ENABLE BIT(20)
+#define HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE BIT(21)
+#define HAL_TCL_DATA_CMD_INFO2_TID GENMASK(25, 22)
+#define HAL_TCL_DATA_CMD_INFO2_LMAC_ID GENMASK(27, 26)
+
+#define HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX GENMASK(5, 0)
+#define HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX GENMASK(25, 6)
+#define HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM GENMASK(29, 26)
+#define HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE GENMASK(31, 30)
+
+#define HAL_TCL_DATA_CMD_INFO4_RING_ID GENMASK(27, 20)
+#define HAL_TCL_DATA_CMD_INFO4_LOOPING_COUNT GENMASK(31, 28)
+
+enum hal_encrypt_type {
+ HAL_ENCRYPT_TYPE_WEP_40,
+ HAL_ENCRYPT_TYPE_WEP_104,
+ HAL_ENCRYPT_TYPE_TKIP_NO_MIC,
+ HAL_ENCRYPT_TYPE_WEP_128,
+ HAL_ENCRYPT_TYPE_TKIP_MIC,
+ HAL_ENCRYPT_TYPE_WAPI,
+ HAL_ENCRYPT_TYPE_CCMP_128,
+ HAL_ENCRYPT_TYPE_OPEN,
+ HAL_ENCRYPT_TYPE_CCMP_256,
+ HAL_ENCRYPT_TYPE_GCMP_128,
+ HAL_ENCRYPT_TYPE_AES_GCMP_256,
+ HAL_ENCRYPT_TYPE_WAPI_GCM_SM4,
+};
+
+enum hal_tcl_encap_type {
+ HAL_TCL_ENCAP_TYPE_RAW,
+ HAL_TCL_ENCAP_TYPE_NATIVE_WIFI,
+ HAL_TCL_ENCAP_TYPE_ETHERNET,
+ HAL_TCL_ENCAP_TYPE_802_3 = 3,
+};
+
+enum hal_tcl_desc_type {
+ HAL_TCL_DESC_TYPE_BUFFER,
+ HAL_TCL_DESC_TYPE_EXT_DESC,
+};
+
+enum hal_wbm_htt_tx_comp_status {
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_OK,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY,
+};
+
+struct hal_tcl_data_cmd {
+ struct ath11k_buffer_addr buf_addr_info;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 info4;
+} __packed;
+
+/* hal_tcl_data_cmd
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * desc_type
+ * Indicates the type of address provided in the buf_addr_info.
+ * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * epd
+ * When this bit is set then input packet is an EPD type.
+ *
+ * encap_type
+ * Indicates the encapsulation that HW will perform. Values are
+ * defined in enum %HAL_TCL_ENCAP_TYPE_.
+ *
+ * encrypt_type
+ * Field only valid for encap_type: RAW
+ * Values are defined in enum %HAL_ENCRYPT_TYPE_.
+ *
+ * src_buffer_swap
+ * Treats source memory (packet buffer) organization as big-endian.
+ * 1'b0: Source memory is little endian
+ * 1'b1: Source memory is big endian
+ *
+ * link_meta_swap
+ * Treats link descriptor and Metadata as big-endian.
+ * 1'b0: memory is little endian
+ * 1'b1: memory is big endian
+ *
+ * search_type
+ * Search type select
+ * 0 - Normal search, 1 - Index based address search,
+ * 2 - Index based flow search
+ *
+ * addrx_en
+ * addry_en
+ * Address X/Y search enable in ASE correspondingly.
+ * 1'b0: Search disable
+ * 1'b1: Search Enable
+ *
+ * cmd_num
+ * This number can be used to match against status.
+ *
+ * data_length
+ * MSDU length in case of direct descriptor. Length of link
+ * extension descriptor in case of Link extension descriptor.
+ *
+ * *_checksum_en
+ * Enable checksum replacement for ipv4, udp_over_ipv4, ipv6,
+ * udp_over_ipv6, tcp_over_ipv4 and tcp_over_ipv6.
+ *
+ * to_fw
+ * Forward packet to FW along with classification result. The
+ * packet will not be forward to TQM when this bit is set.
+ * 1'b0: Use classification result to forward the packet.
+ * 1'b1: Override classification result & forward packet only to fw
+ *
+ * packet_offset
+ * Packet offset from Metadata in case of direct buffer descriptor.
+ *
+ * buffer_timestamp
+ * buffer_timestamp_valid
+ * Frame system entrance timestamp. It shall be filled by first
+ * module (SW, TCL or TQM) that sees the frames first.
+ *
+ * mesh_enable
+ * For raw WiFi frames, this indicates transmission to a mesh STA,
+ * enabling the interpretation of the 'Mesh Control Present' bit
+ * (bit 8) of QoS Control.
+ * For native WiFi frames, this indicates that a 'Mesh Control'
+ * field is present between the header and the LLC.
+ *
+ * hlos_tid_overwrite
+ *
+ * When set, TCL shall ignore the IP DSCP and VLAN PCP
+ * fields and use HLOS_TID as the final TID. Otherwise TCL
+ * shall consider the DSCP and PCP fields as well as HLOS_TID
+ * and choose a final TID based on the configured priority
+ *
+ * hlos_tid
+ * HLOS MSDU priority
+ * Field is used when HLOS_TID_overwrite is set.
+ *
+ * lmac_id
+ * TCL uses this LMAC_ID in address search, i.e, while
+ * finding matching entry for the packet in AST corresponding
+ * to given LMAC_ID
+ *
+ * If LMAC ID is all 1s (=> value 3), it indicates wildcard
+ * match for any MAC
+ *
+ * dscp_tid_table_num
+ * DSCP to TID mapping table number that need to be used
+ * for the MSDU.
+ *
+ * search_index
+ * The index that will be used for index based address or
+ * flow search. The field is valid when 'search_type' is 1 or 2.
+ *
+ * cache_set_num
+ *
+ * Cache set number that should be used to cache the index
+ * based search results, for address and flow search. This
+ * value should be equal to LSB four bits of the hash value of
+ * match data, in case of search index points to an entry which
+ * may be used in content based search also. The value can be
+ * anything when the entry pointed by search index will not be
+ * used for content based search.
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ *
+ * looping_count
+ *
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_TCL_DESC_LEN sizeof(struct hal_tcl_data_cmd)
+
+enum hal_tcl_gse_ctrl {
+ HAL_TCL_GSE_CTRL_RD_STAT,
+ HAL_TCL_GSE_CTRL_SRCH_DIS,
+ HAL_TCL_GSE_CTRL_WR_BK_SINGLE,
+ HAL_TCL_GSE_CTRL_WR_BK_ALL,
+ HAL_TCL_GSE_CTRL_INVAL_SINGLE,
+ HAL_TCL_GSE_CTRL_INVAL_ALL,
+ HAL_TCL_GSE_CTRL_WR_BK_INVAL_SINGLE,
+ HAL_TCL_GSE_CTRL_WR_BK_INVAL_ALL,
+ HAL_TCL_GSE_CTRL_CLR_STAT_SINGLE,
+};
+
+/* hal_tcl_gse_ctrl
+ *
+ * rd_stat
+ * Report or Read statistics
+ * srch_dis
+ * Search disable. Report only Hash.
+ * wr_bk_single
+ * Write Back single entry
+ * wr_bk_all
+ * Write Back entire cache entry
+ * inval_single
+ * Invalidate single cache entry
+ * inval_all
+ * Invalidate entire cache
+ * wr_bk_inval_single
+ * Write back and invalidate single entry in cache
+ * wr_bk_inval_all
+ * Write back and invalidate entire cache
+ * clr_stat_single
+ * Clear statistics for single entry
+ */
+
+#define HAL_TCL_GSE_CMD_INFO0_CTRL_BUF_ADDR_HI GENMASK(7, 0)
+#define HAL_TCL_GSE_CMD_INFO0_GSE_CTRL GENMASK(11, 8)
+#define HAL_TCL_GSE_CMD_INFO0_GSE_SEL BIT(12)
+#define HAL_TCL_GSE_CMD_INFO0_STATUS_DEST_RING_ID BIT(13)
+#define HAL_TCL_GSE_CMD_INFO0_SWAP BIT(14)
+
+#define HAL_TCL_GSE_CMD_INFO1_RING_ID GENMASK(27, 20)
+#define HAL_TCL_GSE_CMD_INFO1_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_tcl_gse_cmd {
+ u32 ctrl_buf_addr_lo;
+ u32 info0;
+ u32 meta_data[2];
+ u32 rsvd0[2];
+ u32 info1;
+} __packed;
+
+/* hal_tcl_gse_cmd
+ *
+ * ctrl_buf_addr_lo, ctrl_buf_addr_hi
+ * Address of a control buffer containing additional info needed
+ * for this command execution.
+ *
+ * gse_ctrl
+ * GSE control operations. This includes cache operations and table
+ * entry statistics read/clear operation. Values are defined in
+ * enum %HAL_TCL_GSE_CTRL.
+ *
+ * gse_sel
+ * To select the ASE/FSE to do the operation mention by GSE_ctrl.
+ * 0: FSE select 1: ASE select
+ *
+ * status_destination_ring_id
+ * TCL status ring to which the GSE status needs to be send.
+ *
+ * swap
+ * Bit to enable byte swapping of contents of buffer.
+ *
+ * meta_data
+ * Meta data to be returned in the status descriptor
+ */
+
+enum hal_tcl_cache_op_res {
+ HAL_TCL_CACHE_OP_RES_DONE,
+ HAL_TCL_CACHE_OP_RES_NOT_FOUND,
+ HAL_TCL_CACHE_OP_RES_TIMEOUT,
+};
+
+#define HAL_TCL_STATUS_RING_INFO0_GSE_CTRL GENMASK(3, 0)
+#define HAL_TCL_STATUS_RING_INFO0_GSE_SEL BIT(4)
+#define HAL_TCL_STATUS_RING_INFO0_CACHE_OP_RES GENMASK(6, 5)
+#define HAL_TCL_STATUS_RING_INFO0_MSDU_CNT GENMASK(31, 8)
+
+#define HAL_TCL_STATUS_RING_INFO1_HASH_IDX GENMASK(19, 0)
+
+#define HAL_TCL_STATUS_RING_INFO2_RING_ID GENMASK(27, 20)
+#define HAL_TCL_STATUS_RING_INFO2_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_tcl_status_ring {
+ u32 info0;
+ u32 msdu_byte_count;
+ u32 msdu_timestamp;
+ u32 meta_data[2];
+ u32 info1;
+ u32 rsvd0;
+ u32 info2;
+} __packed;
+
+/* hal_tcl_status_ring
+ *
+ * gse_ctrl
+ * GSE control operations. This includes cache operations and table
+ * entry statistics read/clear operation. Values are defined in
+ * enum %HAL_TCL_GSE_CTRL.
+ *
+ * gse_sel
+ * To select the ASE/FSE to do the operation mention by GSE_ctrl.
+ * 0: FSE select 1: ASE select
+ *
+ * cache_op_res
+ * Cache operation result. Values are defined in enum
+ * %HAL_TCL_CACHE_OP_RES_.
+ *
+ * msdu_cnt
+ * msdu_byte_count
+ * MSDU count of Entry and MSDU byte count for entry 1.
+ *
+ * hash_indx
+ * Hash value of the entry in case of search failed or disabled.
+ */
+
+#define HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0)
+#define HAL_CE_SRC_DESC_ADDR_INFO_HASH_EN BIT(8)
+#define HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP BIT(9)
+#define HAL_CE_SRC_DESC_ADDR_INFO_DEST_SWAP BIT(10)
+#define HAL_CE_SRC_DESC_ADDR_INFO_GATHER BIT(11)
+#define HAL_CE_SRC_DESC_ADDR_INFO_LEN GENMASK(31, 16)
+
+#define HAL_CE_SRC_DESC_META_INFO_DATA GENMASK(15, 0)
+
+#define HAL_CE_SRC_DESC_FLAGS_RING_ID GENMASK(27, 20)
+#define HAL_CE_SRC_DESC_FLAGS_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_src_desc {
+ u32 buffer_addr_low;
+ u32 buffer_addr_info; /* %HAL_CE_SRC_DESC_ADDR_INFO_ */
+ u32 meta_info; /* %HAL_CE_SRC_DESC_META_INFO_ */
+ u32 flags; /* %HAL_CE_SRC_DESC_FLAGS_ */
+} __packed;
+
+/*
+ * hal_ce_srng_src_desc
+ *
+ * buffer_addr_lo
+ * LSB 32 bits of the 40 Bit Pointer to the source buffer
+ *
+ * buffer_addr_hi
+ * MSB 8 bits of the 40 Bit Pointer to the source buffer
+ *
+ * toeplitz_en
+ * Enable generation of 32-bit Toeplitz-LFSR hash for
+ * data transfer. In case of gather field in first source
+ * ring entry of the gather copy cycle in taken into account.
+ *
+ * src_swap
+ * Treats source memory organization as big-endian. For
+ * each dword read (4 bytes), the byte 0 is swapped with byte 3
+ * and byte 1 is swapped with byte 2.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * dest_swap
+ * Treats destination memory organization as big-endian.
+ * For each dword write (4 bytes), the byte 0 is swapped with
+ * byte 3 and byte 1 is swapped with byte 2.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * gather
+ * Enables gather of multiple copy engine source
+ * descriptors to one destination.
+ *
+ * ce_res_0
+ * Reserved
+ *
+ *
+ * length
+ * Length of the buffer in units of octets of the current
+ * descriptor
+ *
+ * fw_metadata
+ * Meta data used by FW.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * ce_res_1
+ * Reserved
+ *
+ * ce_res_2
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0)
+#define HAL_CE_DEST_DESC_ADDR_INFO_RING_ID GENMASK(27, 20)
+#define HAL_CE_DEST_DESC_ADDR_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dest_desc {
+ u32 buffer_addr_low;
+ u32 buffer_addr_info; /* %HAL_CE_DEST_DESC_ADDR_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dest_desc
+ *
+ * dst_buffer_low
+ * LSB 32 bits of the 40 Bit Pointer to the Destination
+ * buffer
+ *
+ * dst_buffer_high
+ * MSB 8 bits of the 40 Bit Pointer to the Destination
+ * buffer
+ *
+ * ce_res_4
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_CE_DST_STATUS_DESC_FLAGS_HASH_EN BIT(8)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_BYTE_SWAP BIT(9)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_DEST_SWAP BIT(10)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_GATHER BIT(11)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_LEN GENMASK(31, 16)
+
+#define HAL_CE_DST_STATUS_DESC_META_INFO_DATA GENMASK(15, 0)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_RING_ID GENMASK(27, 20)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dst_status_desc {
+ u32 flags; /* %HAL_CE_DST_STATUS_DESC_FLAGS_ */
+ u32 toeplitz_hash0;
+ u32 toeplitz_hash1;
+ u32 meta_info; /* HAL_CE_DST_STATUS_DESC_META_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dst_status_desc
+ *
+ * ce_res_5
+ * Reserved
+ *
+ * toeplitz_en
+ *
+ * src_swap
+ * Source memory buffer swapped
+ *
+ * dest_swap
+ * Destination memory buffer swapped
+ *
+ * gather
+ * Gather of multiple copy engine source descriptors to one
+ * destination enabled
+ *
+ * ce_res_6
+ * Reserved
+ *
+ * length
+ * Sum of all the Lengths of the source descriptor in the
+ * gather chain
+ *
+ * toeplitz_hash_0
+ * 32 LS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * toeplitz_hash_1
+ * 32 MS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * fw_metadata
+ * Meta data used by FW
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * ce_res_7
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_TX_RATE_STATS_INFO0_VALID BIT(0)
+#define HAL_TX_RATE_STATS_INFO0_BW GENMASK(2, 1)
+#define HAL_TX_RATE_STATS_INFO0_PKT_TYPE GENMASK(6, 3)
+#define HAL_TX_RATE_STATS_INFO0_STBC BIT(7)
+#define HAL_TX_RATE_STATS_INFO0_LDPC BIT(8)
+#define HAL_TX_RATE_STATS_INFO0_SGI GENMASK(10, 9)
+#define HAL_TX_RATE_STATS_INFO0_MCS GENMASK(14, 11)
+#define HAL_TX_RATE_STATS_INFO0_OFDMA_TX BIT(15)
+#define HAL_TX_RATE_STATS_INFO0_TONES_IN_RU GENMASK(27, 16)
+
+enum hal_tx_rate_stats_bw {
+ HAL_TX_RATE_STATS_BW_20,
+ HAL_TX_RATE_STATS_BW_40,
+ HAL_TX_RATE_STATS_BW_80,
+ HAL_TX_RATE_STATS_BW_160,
+};
+
+enum hal_tx_rate_stats_pkt_type {
+ HAL_TX_RATE_STATS_PKT_TYPE_11A,
+ HAL_TX_RATE_STATS_PKT_TYPE_11B,
+ HAL_TX_RATE_STATS_PKT_TYPE_11N,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AC,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AX,
+};
+
+enum hal_tx_rate_stats_sgi {
+ HAL_TX_RATE_STATS_SGI_08US,
+ HAL_TX_RATE_STATS_SGI_04US,
+ HAL_TX_RATE_STATS_SGI_16US,
+ HAL_TX_RATE_STATS_SGI_32US,
+};
+
+struct hal_tx_rate_stats {
+ u32 info0;
+ u32 tsf;
+} __packed;
+
+struct hal_wbm_link_desc {
+ struct ath11k_buffer_addr buf_addr_info;
+} __packed;
+
+/* hal_wbm_link_desc
+ *
+ * Producer: WBM
+ * Consumer: WBM
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ */
+
+enum hal_wbm_rel_src_module {
+ HAL_WBM_REL_SRC_MODULE_TQM,
+ HAL_WBM_REL_SRC_MODULE_RXDMA,
+ HAL_WBM_REL_SRC_MODULE_REO,
+ HAL_WBM_REL_SRC_MODULE_FW,
+ HAL_WBM_REL_SRC_MODULE_SW,
+};
+
+enum hal_wbm_rel_desc_type {
+ HAL_WBM_REL_DESC_TYPE_REL_MSDU,
+ HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MPDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MSDU_EXT,
+ HAL_WBM_REL_DESC_TYPE_QUEUE_EXT,
+};
+
+/* hal_wbm_rel_desc_type
+ *
+ * msdu_buffer
+ * The address points to an MSDU buffer
+ *
+ * msdu_link_descriptor
+ * The address points to an Tx MSDU link descriptor
+ *
+ * mpdu_link_descriptor
+ * The address points to an MPDU link descriptor
+ *
+ * msdu_ext_descriptor
+ * The address points to an MSDU extension descriptor
+ *
+ * queue_ext_descriptor
+ * The address points to an TQM queue extension descriptor. WBM should
+ * treat this is the same way as a link descriptor.
+ */
+
+enum hal_wbm_rel_bm_act {
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE,
+ HAL_WBM_REL_BM_ACT_REL_MSDU,
+};
+
+/* hal_wbm_rel_bm_act
+ *
+ * put_in_idle_list
+ * Put the buffer or descriptor back in the idle list. In case of MSDU or
+ * MDPU link descriptor, BM does not need to check to release any
+ * individual MSDU buffers.
+ *
+ * release_msdu_list
+ * This BM action can only be used in combination with desc_type being
+ * msdu_link_descriptor. Field first_msdu_index points out which MSDU
+ * pointer in the MSDU link descriptor is the first of an MPDU that is
+ * released. BM shall release all the MSDU buffers linked to this first
+ * MSDU buffer pointer. All related MSDU buffer pointer entries shall be
+ * set to value 0, which represents the 'NULL' pointer. When all MSDU
+ * buffer pointers in the MSDU link descriptor are 'NULL', the MSDU link
+ * descriptor itself shall also be released.
+ */
+
+#define HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE GENMASK(2, 0)
+#define HAL_WBM_RELEASE_INFO0_BM_ACTION GENMASK(5, 3)
+#define HAL_WBM_RELEASE_INFO0_DESC_TYPE GENMASK(8, 6)
+#define HAL_WBM_RELEASE_INFO0_FIRST_MSDU_IDX GENMASK(12, 9)
+#define HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON GENMASK(16, 13)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON GENMASK(18, 17)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE GENMASK(23, 19)
+#define HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON GENMASK(25, 24)
+#define HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE GENMASK(30, 26)
+#define HAL_WBM_RELEASE_INFO0_WBM_INTERNAL_ERROR BIT(31)
+
+#define HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER GENMASK(23, 0)
+#define HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT GENMASK(30, 24)
+
+#define HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI GENMASK(7, 0)
+#define HAL_WBM_RELEASE_INFO2_SW_REL_DETAILS_VALID BIT(8)
+#define HAL_WBM_RELEASE_INFO2_FIRST_MSDU BIT(9)
+#define HAL_WBM_RELEASE_INFO2_LAST_MSDU BIT(10)
+#define HAL_WBM_RELEASE_INFO2_MSDU_IN_AMSDU BIT(11)
+#define HAL_WBM_RELEASE_INFO2_FW_TX_NOTIF_FRAME BIT(12)
+#define HAL_WBM_RELEASE_INFO2_BUFFER_TIMESTAMP GENMASK(31, 13)
+
+#define HAL_WBM_RELEASE_INFO3_PEER_ID GENMASK(15, 0)
+#define HAL_WBM_RELEASE_INFO3_TID GENMASK(19, 16)
+#define HAL_WBM_RELEASE_INFO3_RING_ID GENMASK(27, 20)
+#define HAL_WBM_RELEASE_INFO3_LOOPING_COUNT GENMASK(31, 28)
+
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_STATUS GENMASK(12, 9)
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_REINJ_REASON GENMASK(16, 13)
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_EXP_FRAME BIT(17)
+
+struct hal_wbm_release_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ struct hal_tx_rate_stats rate_stats;
+ u32 info3;
+} __packed;
+
+/* hal_wbm_release_ring
+ *
+ * Producer: SW/TQM/RXDMA/REO/SWITCH
+ * Consumer: WBM/SW/FW
+ *
+ * HTT tx status is overlaid on wbm_release ring on 4-byte words 2, 3, 4 and 5
+ * for software based completions.
+ *
+ * buf_addr_info
+ * Details of the physical address of the buffer or link descriptor.
+ *
+ * release_source_module
+ * Indicates which module initiated the release of this buffer/descriptor.
+ * Values are defined in enum %HAL_WBM_REL_SRC_MODULE_.
+ *
+ * bm_action
+ * Field only valid when the field return_buffer_manager in
+ * Released_buff_or_desc_addr_info indicates:
+ * WBM_IDLE_BUF_LIST / WBM_IDLE_DESC_LIST
+ * Values are defined in enum %HAL_WBM_REL_BM_ACT_.
+ *
+ * buffer_or_desc_type
+ * Field only valid when WBM is marked as the return_buffer_manager in
+ * the Released_Buffer_address_info. Indicates that type of buffer or
+ * descriptor is being released. Values are in enum %HAL_WBM_REL_DESC_TYPE.
+ *
+ * first_msdu_index
+ * Field only valid for the bm_action release_msdu_list. The index of the
+ * first MSDU in an MSDU link descriptor all belonging to the same MPDU.
+ *
+ * tqm_release_reason
+ * Field only valid when Release_source_module is set to release_source_TQM
+ * Release reasons are defined in enum %HAL_WBM_TQM_REL_REASON_.
+ *
+ * rxdma_push_reason
+ * reo_push_reason
+ * Indicates why rxdma/reo pushed the frame to this ring and values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * rxdma_error_code
+ * Field only valid when 'rxdma_push_reason' set to 'error_detected'.
+ * Values are defined in enum %HAL_REO_ENTR_RING_RXDMA_ECODE_.
+ *
+ * reo_error_code
+ * Field only valid when 'reo_push_reason' set to 'error_detected'. Values
+ * are defined in enum %HAL_REO_DEST_RING_ERROR_CODE_.
+ *
+ * wbm_internal_error
+ * Is set when WBM got a buffer pointer but the action was to push it to
+ * the idle link descriptor ring or do link related activity OR
+ * Is set when WBM got a link buffer pointer but the action was to push it
+ * to the buffer descriptor ring.
+ *
+ * tqm_status_number
+ * The value in this field is equal to tqm_cmd_number in TQM command. It is
+ * used to correlate the statu with TQM commands. Only valid when
+ * release_source_module is TQM.
+ *
+ * transmit_count
+ * The number of times the frame has been transmitted, valid only when
+ * release source in TQM.
+ *
+ * ack_frame_rssi
+ * This field is only valid when the source is TQM. If this frame is
+ * removed as the result of the reception of an ACK or BA, this field
+ * indicates the RSSI of the received ACK or BA frame.
+ *
+ * sw_release_details_valid
+ * This is set when WMB got a 'release_msdu_list' command from TQM and
+ * return buffer manager is not WMB. WBM will then de-aggregate all MSDUs
+ * and pass them one at a time on to the 'buffer owner'.
+ *
+ * first_msdu
+ * Field only valid when SW_release_details_valid is set.
+ * When set, this MSDU is the first MSDU pointed to in the
+ * 'release_msdu_list' command.
+ *
+ * last_msdu
+ * Field only valid when SW_release_details_valid is set.
+ * When set, this MSDU is the last MSDU pointed to in the
+ * 'release_msdu_list' command.
+ *
+ * msdu_part_of_amsdu
+ * Field only valid when SW_release_details_valid is set.
+ * When set, this MSDU was part of an A-MSDU in MPDU
+ *
+ * fw_tx_notify_frame
+ * Field only valid when SW_release_details_valid is set.
+ *
+ * buffer_timestamp
+ * Field only valid when SW_release_details_valid is set.
+ * This is the Buffer_timestamp field from the
+ * Timestamp in units of 1024 us
+ *
+ * struct hal_tx_rate_stats rate_stats
+ * Details for command execution tracking purposes.
+ *
+ * sw_peer_id
+ * tid
+ * Field only valid when Release_source_module is set to
+ * release_source_TQM
+ *
+ * 1) Release of msdu buffer due to drop_frame = 1. Flow is
+ * not fetched and hence sw_peer_id and tid = 0
+ *
+ * buffer_or_desc_type = e_num 0
+ * MSDU_rel_buffertqm_release_reason = e_num 1
+ * tqm_rr_rem_cmd_rem
+ *
+ * 2) Release of msdu buffer due to Flow is not fetched and
+ * hence sw_peer_id and tid = 0
+ *
+ * buffer_or_desc_type = e_num 0
+ * MSDU_rel_buffertqm_release_reason = e_num 1
+ * tqm_rr_rem_cmd_rem
+ *
+ * 3) Release of msdu link due to remove_mpdu or acked_mpdu
+ * command.
+ *
+ * buffer_or_desc_type = e_num1
+ * msdu_link_descriptortqm_release_reason can be:e_num 1
+ * tqm_rr_rem_cmd_reme_num 2 tqm_rr_rem_cmd_tx
+ * e_num 3 tqm_rr_rem_cmd_notxe_num 4 tqm_rr_rem_cmd_aged
+ *
+ * This field represents the TID from the TX_MSDU_FLOW
+ * descriptor or TX_MPDU_QUEUE descriptor
+ *
+ * rind_id
+ * For debugging.
+ * This field is filled in by the SRNG module.
+ * It help to identify the ring that is being looked
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Buffer Manager Ring has looped
+ * around the ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+/**
+ * enum hal_wbm_tqm_rel_reason - TQM release reason code
+ * @HAL_WBM_TQM_REL_REASON_FRAME_ACKED: ACK or BACK received for the frame
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU: Command remove_mpdus initiated by SW
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX: Command remove transmitted_mpdus
+ * initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX: Command remove untransmitted_mpdus
+ * initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES: Command remove aged msdus or
+ * mpdus.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1: Remove command initiated by
+ * fw with fw_reason1.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2: Remove command initiated by
+ * fw with fw_reason2.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by
+ * fw with fw_reason3.
+ */
+enum hal_wbm_tqm_rel_reason {
+ HAL_WBM_TQM_REL_REASON_FRAME_ACKED,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3,
+};
+
+struct hal_wbm_buffer_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+};
+
+enum hal_desc_owner {
+ HAL_DESC_OWNER_WBM,
+ HAL_DESC_OWNER_SW,
+ HAL_DESC_OWNER_TQM,
+ HAL_DESC_OWNER_RXDMA,
+ HAL_DESC_OWNER_REO,
+ HAL_DESC_OWNER_SWITCH,
+};
+
+enum hal_desc_buf_type {
+ HAL_DESC_BUF_TYPE_TX_MSDU_LINK,
+ HAL_DESC_BUF_TYPE_TX_MPDU_LINK,
+ HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_HEAD,
+ HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_EXT,
+ HAL_DESC_BUF_TYPE_TX_FLOW,
+ HAL_DESC_BUF_TYPE_TX_BUFFER,
+ HAL_DESC_BUF_TYPE_RX_MSDU_LINK,
+ HAL_DESC_BUF_TYPE_RX_MPDU_LINK,
+ HAL_DESC_BUF_TYPE_RX_REO_QUEUE,
+ HAL_DESC_BUF_TYPE_RX_REO_QUEUE_EXT,
+ HAL_DESC_BUF_TYPE_RX_BUFFER,
+ HAL_DESC_BUF_TYPE_IDLE_LINK,
+};
+
+#define HAL_DESC_REO_OWNED 4
+#define HAL_DESC_REO_QUEUE_DESC 8
+#define HAL_DESC_REO_QUEUE_EXT_DESC 9
+#define HAL_DESC_REO_NON_QOS_TID 16
+
+#define HAL_DESC_HDR_INFO0_OWNER GENMASK(3, 0)
+#define HAL_DESC_HDR_INFO0_BUF_TYPE GENMASK(7, 4)
+#define HAL_DESC_HDR_INFO0_DBG_RESERVED GENMASK(31, 8)
+
+struct hal_desc_header {
+ u32 info0;
+} __packed;
+
+struct hal_rx_mpdu_link_ptr {
+ struct ath11k_buffer_addr addr_info;
+} __packed;
+
+struct hal_rx_msdu_details {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_msdu_desc rx_msdu_info;
+} __packed;
+
+#define HAL_RX_MSDU_LNK_INFO0_RX_QUEUE_NUMBER GENMASK(15, 0)
+#define HAL_RX_MSDU_LNK_INFO0_FIRST_MSDU_LNK BIT(16)
+
+struct hal_rx_msdu_link {
+ struct hal_desc_header desc_hdr;
+ struct ath11k_buffer_addr buf_addr_info;
+ u32 info0;
+ u32 pn[4];
+ struct hal_rx_msdu_details msdu_link[6];
+} __packed;
+
+struct hal_rx_reo_queue_ext {
+ struct hal_desc_header desc_hdr;
+ u32 rsvd;
+ struct hal_rx_mpdu_link_ptr mpdu_link[15];
+} __packed;
+
+/* hal_rx_reo_queue_ext
+ * Consumer: REO
+ * Producer: REO
+ *
+ * descriptor_header
+ * Details about which module owns this struct.
+ *
+ * mpdu_link
+ * Pointer to the next MPDU_link descriptor in the MPDU queue.
+ */
+
+enum hal_rx_reo_queue_pn_size {
+ HAL_RX_REO_QUEUE_PN_SIZE_24,
+ HAL_RX_REO_QUEUE_PN_SIZE_48,
+ HAL_RX_REO_QUEUE_PN_SIZE_128,
+};
+
+#define HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER GENMASK(15, 0)
+
+#define HAL_RX_REO_QUEUE_INFO0_VLD BIT(0)
+#define HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER GENMASK(2, 1)
+#define HAL_RX_REO_QUEUE_INFO0_DIS_DUP_DETECTION BIT(3)
+#define HAL_RX_REO_QUEUE_INFO0_SOFT_REORDER_EN BIT(4)
+#define HAL_RX_REO_QUEUE_INFO0_AC GENMASK(6, 5)
+#define HAL_RX_REO_QUEUE_INFO0_BAR BIT(7)
+#define HAL_RX_REO_QUEUE_INFO0_RETRY BIT(8)
+#define HAL_RX_REO_QUEUE_INFO0_CHECK_2K_MODE BIT(9)
+#define HAL_RX_REO_QUEUE_INFO0_OOR_MODE BIT(10)
+#define HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE GENMASK(18, 11)
+#define HAL_RX_REO_QUEUE_INFO0_PN_CHECK BIT(19)
+#define HAL_RX_REO_QUEUE_INFO0_EVEN_PN BIT(20)
+#define HAL_RX_REO_QUEUE_INFO0_UNEVEN_PN BIT(21)
+#define HAL_RX_REO_QUEUE_INFO0_PN_HANDLE_ENABLE BIT(22)
+#define HAL_RX_REO_QUEUE_INFO0_PN_SIZE GENMASK(24, 23)
+#define HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG BIT(25)
+
+#define HAL_RX_REO_QUEUE_INFO1_SVLD BIT(0)
+#define HAL_RX_REO_QUEUE_INFO1_SSN GENMASK(12, 1)
+#define HAL_RX_REO_QUEUE_INFO1_CURRENT_IDX GENMASK(20, 13)
+#define HAL_RX_REO_QUEUE_INFO1_SEQ_2K_ERR BIT(21)
+#define HAL_RX_REO_QUEUE_INFO1_PN_ERR BIT(22)
+#define HAL_RX_REO_QUEUE_INFO1_PN_VALID BIT(31)
+
+#define HAL_RX_REO_QUEUE_INFO2_MPDU_COUNT GENMASK(6, 0)
+#define HAL_RX_REO_QUEUE_INFO2_MSDU_COUNT (31, 7)
+
+#define HAL_RX_REO_QUEUE_INFO3_TIMEOUT_COUNT GENMASK(9, 4)
+#define HAL_RX_REO_QUEUE_INFO3_FWD_DUE_TO_BAR_CNT GENMASK(15, 10)
+#define HAL_RX_REO_QUEUE_INFO3_DUPLICATE_COUNT GENMASK(31, 16)
+
+#define HAL_RX_REO_QUEUE_INFO4_FRAME_IN_ORD_COUNT GENMASK(23, 0)
+#define HAL_RX_REO_QUEUE_INFO4_BAR_RECVD_COUNT GENMASK(31, 24)
+
+#define HAL_RX_REO_QUEUE_INFO5_LATE_RX_MPDU_COUNT GENMASK(11, 0)
+#define HAL_RX_REO_QUEUE_INFO5_WINDOW_JUMP_2K GENMASK(15, 12)
+#define HAL_RX_REO_QUEUE_INFO5_HOLE_COUNT GENMASK(31, 16)
+
+struct hal_rx_reo_queue {
+ struct hal_desc_header desc_hdr;
+ u32 rx_queue_num;
+ u32 info0;
+ u32 info1;
+ u32 pn[4];
+ u32 last_rx_enqueue_timestamp;
+ u32 last_rx_dequeue_timestamp;
+ u32 next_aging_queue[2];
+ u32 prev_aging_queue[2];
+ u32 rx_bitmap[8];
+ u32 info2;
+ u32 info3;
+ u32 info4;
+ u32 processed_mpdus;
+ u32 processed_msdus;
+ u32 processed_total_bytes;
+ u32 info5;
+ u32 rsvd[3];
+ struct hal_rx_reo_queue_ext ext_desc[];
+} __packed;
+
+/* hal_rx_reo_queue
+ *
+ * descriptor_header
+ * Details about which module owns this struct. Note that sub field
+ * Buffer_type shall be set to receive_reo_queue_descriptor.
+ *
+ * receive_queue_number
+ * Indicates the MPDU queue ID to which this MPDU link descriptor belongs.
+ *
+ * vld
+ * Valid bit indicating a session is established and the queue descriptor
+ * is valid.
+ * associated_link_descriptor_counter
+ * Indicates which of the 3 link descriptor counters shall be incremented
+ * or decremented when link descriptors are added or removed from this
+ * flow queue.
+ * disable_duplicate_detection
+ * When set, do not perform any duplicate detection.
+ * soft_reorder_enable
+ * When set, REO has been instructed to not perform the actual re-ordering
+ * of frames for this queue, but just to insert the reorder opcodes.
+ * ac
+ * Indicates the access category of the queue descriptor.
+ * bar
+ * Indicates if BAR has been received.
+ * retry
+ * Retry bit is checked if this bit is set.
+ * chk_2k_mode
+ * Indicates what type of operation is expected from Reo when the received
+ * frame SN falls within the 2K window.
+ * oor_mode
+ * Indicates what type of operation is expected when the received frame
+ * falls within the OOR window.
+ * ba_window_size
+ * Indicates the negotiated (window size + 1). Max of 256 bits.
+ *
+ * A value 255 means 256 bitmap, 63 means 64 bitmap, 0 (means non-BA
+ * session, with window size of 0). The 3 values here are the main values
+ * validated, but other values should work as well.
+ *
+ * A BA window size of 0 (=> one frame entry bitmat), means that there is
+ * no additional rx_reo_queue_ext desc. following rx_reo_queue in memory.
+ * A BA window size of 1 - 105, means that there is 1 rx_reo_queue_ext.
+ * A BA window size of 106 - 210, means that there are 2 rx_reo_queue_ext.
+ * A BA window size of 211 - 256, means that there are 3 rx_reo_queue_ext.
+ * pn_check_needed, pn_shall_be_even, pn_shall_be_uneven, pn_handling_enable,
+ * pn_size
+ * REO shall perform the PN increment check, even number check, uneven
+ * number check, PN error check and size of the PN field check.
+ * ignore_ampdu_flag
+ * REO shall ignore the ampdu_flag on entrance descriptor for this queue.
+ *
+ * svld
+ * Sequence number in next field is valid one.
+ * ssn
+ * Starting Sequence number of the session.
+ * current_index
+ * Points to last forwarded packet
+ * seq_2k_error_detected_flag
+ * REO has detected a 2k error jump in the sequence number and from that
+ * moment forward, all new frames are forwarded directly to FW, without
+ * duplicate detect, reordering, etc.
+ * pn_error_detected_flag
+ * REO has detected a PN error.
+ */
+
+#define HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM BIT(8)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD BIT(9)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION BIT(11)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN BIT(12)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC BIT(13)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR BIT(14)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY BIT(15)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE BIT(17)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE BIT(18)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN BIT(21)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE BIT(22)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_ERR BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN BIT(30)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER GENMASK(15, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_VLD BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER GENMASK(18, 17)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_AC GENMASK(22, 21)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_BAR BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RETRY BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE BIT(30)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG BIT(31)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(9, 8)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(22, 11)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(25)
+
+struct hal_reo_update_rx_queue {
+ struct hal_reo_cmd_hdr cmd;
+ u32 queue_addr_lo;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 pn[4];
+} __packed;
+
+#define HAL_REO_UNBLOCK_CACHE_INFO0_UNBLK_CACHE BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_INFO0_RESOURCE_IDX GENMASK(2, 1)
+
+struct hal_reo_unblock_cache {
+ struct hal_reo_cmd_hdr cmd;
+ u32 info0;
+ u32 rsvd[7];
+} __packed;
+
+enum hal_reo_exec_status {
+ HAL_REO_EXEC_STATUS_SUCCESS,
+ HAL_REO_EXEC_STATUS_BLOCKED,
+ HAL_REO_EXEC_STATUS_FAILED,
+ HAL_REO_EXEC_STATUS_RESOURCE_BLOCKED,
+};
+
+#define HAL_REO_STATUS_HDR_INFO0_STATUS_NUM GENMASK(15, 0)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_TIME GENMASK(25, 16)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS GENMASK(27, 26)
+
+struct hal_reo_status_hdr {
+ u32 info0;
+ u32 timestamp;
+} __packed;
+
+/* hal_reo_status_hdr
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_num
+ * The value in this field is equal to value of the reo command
+ * number. This field helps to correlate the statuses with the REO
+ * commands.
+ *
+ * execution_time (in us)
+ * The amount of time REO took to execute the command. Note that
+ * this time does not include the duration of the command waiting
+ * in the command ring, before the execution started.
+ *
+ * execution_status
+ * Execution status of the command. Values are defined in
+ * enum %HAL_REO_EXEC_STATUS_.
+ */
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX GENMASK(19, 12)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT GENMASK(6, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT GENMASK(31, 7)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT GENMASK(9, 4)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT GENMASK(15, 10)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT GENMASK(31, 16)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT GENMASK(23, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT GENMASK(31, 24)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K GENMASK(15, 12)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT GENMASK(31, 16)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT GENMASK(31, 28)
+
+struct hal_reo_get_queue_stats_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 pn[4];
+ u32 last_rx_enqueue_timestamp;
+ u32 last_rx_dequeue_timestamp;
+ u32 rx_bitmap[8];
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 num_mpdu_frames;
+ u32 num_msdu_frames;
+ u32 total_bytes;
+ u32 info4;
+ u32 info5;
+} __packed;
+
+/* hal_reo_get_queue_stats_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * ssn
+ * Starting Sequence number of the session, this changes whenever
+ * window moves (can be filled by SW then maintained by REO).
+ *
+ * current_index
+ * Points to last forwarded packet.
+ *
+ * pn
+ * Bits of the PN number.
+ *
+ * last_rx_enqueue_timestamp
+ * last_rx_dequeue_timestamp
+ * Timestamp of arrival of the last MPDU for this queue and
+ * Timestamp of forwarding an MPDU accordingly.
+ *
+ * rx_bitmap
+ * When a bit is set, the corresponding frame is currently held
+ * in the re-order queue. The bitmap is Fully managed by HW.
+ *
+ * current_mpdu_count
+ * current_msdu_count
+ * The number of MPDUs and MSDUs in the queue.
+ *
+ * timeout_count
+ * The number of times REO started forwarding frames even though
+ * there is a hole in the bitmap. Forwarding reason is timeout.
+ *
+ * forward_due_to_bar_count
+ * The number of times REO started forwarding frames even though
+ * there is a hole in the bitmap. Fwd reason is reception of BAR.
+ *
+ * duplicate_count
+ * The number of duplicate frames that have been detected.
+ *
+ * frames_in_order_count
+ * The number of frames that have been received in order (without
+ * a hole that prevented them from being forwarded immediately).
+ *
+ * bar_received_count
+ * The number of times a BAR frame is received.
+ *
+ * mpdu_frames_processed_count
+ * msdu_frames_processed_count
+ * The total number of MPDU/MSDU frames that have been processed.
+ *
+ * total_bytes
+ * An approximation of the number of bytes received for this queue.
+ *
+ * late_receive_mpdu_count
+ * The number of MPDUs received after the window had already moved
+ * on. The 'late' sequence window is defined as
+ * (Window SSN - 256) - (Window SSN - 1).
+ *
+ * window_jump_2k
+ * The number of times the window moved more than 2K
+ *
+ * hole_count
+ * The number of times a hole was created in the receive bitmap.
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_STATUS_LOOP_CNT GENMASK(31, 28)
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED BIT(0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_RSVD GENMASK(31, 1)
+#define HAL_REO_FLUSH_QUEUE_INFO1_RSVD GENMASK(27, 0)
+
+struct hal_reo_flush_queue_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 rsvd0[21];
+ u32 info1;
+} __packed;
+
+/* hal_reo_flush_queue_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * Status of blocking resource
+ *
+ * 0 - No error has been detected while executing this command
+ * 1 - Error detected. The resource to be used for blocking was
+ * already in use.
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE GENMASK(2, 1)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT BIT(8)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE GENMASK(11, 9)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID GENMASK(15, 12)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR GENMASK(17, 16)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT GENMASK(25, 18)
+
+struct hal_reo_flush_cache_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 rsvd0[21];
+ u32 info1;
+} __packed;
+
+/* hal_reo_flush_cache_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * Status for blocking resource handling
+ *
+ * 0 - No error has been detected while executing this command
+ * 1 - An error in the blocking resource management was detected
+ *
+ * block_error_details
+ * only valid when error_detected is set
+ *
+ * 0 - No blocking related errors found
+ * 1 - Blocking resource is already in use
+ * 2 - Resource requested to be unblocked, was not blocked
+ *
+ * cache_controller_flush_status_hit
+ * The status that the cache controller returned on executing the
+ * flush command.
+ *
+ * 0 - miss; 1 - hit
+ *
+ * cache_controller_flush_status_desc_type
+ * Flush descriptor type
+ *
+ * cache_controller_flush_status_client_id
+ * Module who made the flush request
+ *
+ * In REO, this is always 0
+ *
+ * cache_controller_flush_status_error
+ * Error condition
+ *
+ * 0 - No error found
+ * 1 - HW interface is still busy
+ * 2 - Line currently locked. Used for one line flush command
+ * 3 - At least one line is still locked.
+ * Used for cache flush command.
+ *
+ * cache_controller_flush_count
+ * The number of lines that were actually flushed out
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE BIT(1)
+
+struct hal_reo_unblock_cache_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 rsvd0[21];
+ u32 info1;
+} __packed;
+
+/* hal_reo_unblock_cache_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * 0 - No error has been detected while executing this command
+ * 1 - The blocking resource was not in use, and therefore it could
+ * not be unblocked.
+ *
+ * unblock_type
+ * Reference to the type of unblock command
+ * 0 - Unblock a blocking resource
+ * 1 - The entire cache usage is unblock
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY BIT(1)
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT GENMASK(15, 0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT GENMASK(31, 16)
+
+struct hal_reo_flush_timeout_list_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 info1;
+ u32 rsvd0[20];
+ u32 info2;
+} __packed;
+
+/* hal_reo_flush_timeout_list_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * 0 - No error has been detected while executing this command
+ * 1 - Command not properly executed and returned with error
+ *
+ * timeout_list_empty
+ * When set, REO has depleted the timeout list and all entries are
+ * gone.
+ *
+ * release_desc_count
+ * Producer: SW; Consumer: REO
+ * The number of link descriptor released
+ *
+ * forward_buf_count
+ * Producer: SW; Consumer: REO
+ * The number of buffers forwarded to the REO destination rings
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX GENMASK(1, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM GENMASK(25, 0)
+
+struct hal_reo_desc_thresh_reached_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 info4;
+ u32 rsvd0[17];
+ u32 info5;
+} __packed;
+
+/* hal_reo_desc_thresh_reached_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * threshold_index
+ * The index of the threshold register whose value got reached
+ *
+ * link_descriptor_counter0
+ * link_descriptor_counter1
+ * link_descriptor_counter2
+ * link_descriptor_counter_sum
+ * Value of the respective counters at generation of this message
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#endif /* ATH11K_HAL_DESC_H */
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
new file mode 100644
index 000000000000..753bd93f0212
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -0,0 +1,1610 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "debug.h"
+#include "hal.h"
+#include "hal_tx.h"
+#include "hal_rx.h"
+#include "hal_desc.h"
+#include "hif.h"
+
+static void ath11k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr,
+ u8 owner, u8 buffer_type, u32 magic)
+{
+ hdr->info0 = FIELD_PREP(HAL_DESC_HDR_INFO0_OWNER, owner) |
+ FIELD_PREP(HAL_DESC_HDR_INFO0_BUF_TYPE, buffer_type);
+
+ /* Magic pattern in reserved bits for debugging */
+ hdr->info0 |= FIELD_PREP(HAL_DESC_HDR_INFO0_DBG_RESERVED, magic);
+}
+
+static int ath11k_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_get_queue_stats *desc;
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_GET_QUEUE_STATS) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ memset_startat(desc, 0, queue_addr_lo);
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->queue_addr_lo = cmd->addr_lo;
+ desc->info0 = FIELD_PREP(HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI,
+ cmd->addr_hi);
+ if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
+ desc->info0 |= HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS;
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+static int ath11k_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_flush_cache *desc;
+ u8 avail_slot = ffz(hal->avail_blk_resource);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
+ return -ENOSPC;
+
+ hal->current_blk_index = avail_slot;
+ }
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_FLUSH_CACHE) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_flush_cache *)tlv->value;
+ memset_startat(desc, 0, cache_addr_lo);
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->cache_addr_lo = cmd->addr_lo;
+ desc->info0 = FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI,
+ cmd->addr_hi);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS;
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE;
+ desc->info0 |=
+ FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX,
+ avail_slot);
+ }
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE;
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL;
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+static int ath11k_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_update_rx_queue *desc;
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_UPDATE_RX_REO_QUEUE) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_update_rx_queue *)tlv->value;
+ memset_startat(desc, 0, queue_addr_lo);
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->queue_addr_lo = cmd->addr_lo;
+ desc->info0 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI,
+ cmd->addr_hi) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_AC)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN));
+
+ desc->info1 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER,
+ cmd->rx_queue_num) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_VLD,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER,
+ FIELD_GET(HAL_REO_CMD_UPD1_ALDC, cmd->upd1)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_AC,
+ FIELD_GET(HAL_REO_CMD_UPD1_AC, cmd->upd1)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_BAR,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RETRY,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG));
+
+ if (cmd->pn_size == 24)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
+ else if (cmd->pn_size == 48)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
+ else if (cmd->pn_size == 128)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
+
+ if (cmd->ba_window_size < 1)
+ cmd->ba_window_size = 1;
+
+ if (cmd->ba_window_size == 1)
+ cmd->ba_window_size++;
+
+ desc->info2 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE,
+ cmd->ba_window_size - 1) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE, cmd->pn_size) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SVLD,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SSN,
+ FIELD_GET(HAL_REO_CMD_UPD2_SSN, cmd->upd2)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR));
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_tlv_hdr *reo_desc;
+ int ret;
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+ reo_desc = (struct hal_tlv_hdr *)ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!reo_desc) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ switch (type) {
+ case HAL_REO_CMD_GET_QUEUE_STATS:
+ ret = ath11k_hal_reo_cmd_queue_stats(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_CACHE:
+ ret = ath11k_hal_reo_cmd_flush_cache(&ab->hal, reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_UPDATE_RX_QUEUE:
+ ret = ath11k_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_QUEUE:
+ case HAL_REO_CMD_UNBLOCK_CACHE:
+ case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
+ ath11k_warn(ab, "Unsupported reo command %d\n", type);
+ ret = -EOPNOTSUPP;
+ break;
+ default:
+ ath11k_warn(ab, "Unknown reo command %d\n", type);
+ ret = -EINVAL;
+ break;
+ }
+
+ ath11k_dp_shadow_start_timer(ab, srng, &ab->dp.reo_cmd_timer);
+
+out:
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr,
+ u32 cookie, u8 manager)
+{
+ struct ath11k_buffer_addr *binfo = desc;
+ u32 paddr_lo, paddr_hi;
+
+ paddr_lo = lower_32_bits(paddr);
+ paddr_hi = upper_32_bits(paddr);
+ binfo->info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, paddr_lo);
+ binfo->info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, paddr_hi) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, manager);
+}
+
+void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm)
+{
+ struct ath11k_buffer_addr *binfo = desc;
+
+ *paddr =
+ (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR, binfo->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR, binfo->info0);
+ *cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, binfo->info1);
+ *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, binfo->info1);
+}
+
+void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus,
+ u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm)
+{
+ struct hal_rx_msdu_link *link = link_desc;
+ struct hal_rx_msdu_details *msdu;
+ int i;
+
+ *num_msdus = HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+ msdu = &link->msdu_link[0];
+ *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ msdu->buf_addr_info.info1);
+
+ for (i = 0; i < *num_msdus; i++) {
+ msdu = &link->msdu_link[i];
+
+ if (!FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ msdu->buf_addr_info.info0)) {
+ *num_msdus = i;
+ break;
+ }
+ *msdu_cookies = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ msdu->buf_addr_info.info1);
+ msdu_cookies++;
+ }
+}
+
+int ath11k_hal_desc_reo_parse_err(struct ath11k_base *ab, u32 *rx_desc,
+ dma_addr_t *paddr, u32 *desc_bank)
+{
+ struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ enum hal_reo_dest_ring_error_code err_code;
+
+ push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
+ desc->info0);
+ err_code = FIELD_GET(HAL_REO_DEST_RING_INFO0_ERROR_CODE,
+ desc->info0);
+ ab->soc_stats.reo_error[err_code]++;
+
+ if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
+ push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ ath11k_warn(ab, "expected error push reason code, received %d\n",
+ push_reason);
+ return -EINVAL;
+ }
+
+ if (FIELD_GET(HAL_REO_DEST_RING_INFO0_BUFFER_TYPE, desc->info0) !=
+ HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) {
+ ath11k_warn(ab, "expected buffer type link_desc");
+ return -EINVAL;
+ }
+
+ ath11k_hal_rx_reo_ent_paddr_get(ab, rx_desc, paddr, desc_bank);
+
+ return 0;
+}
+
+int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info)
+{
+ struct hal_wbm_release_ring *wbm_desc = desc;
+ enum hal_wbm_rel_desc_type type;
+ enum hal_wbm_rel_src_module rel_src;
+ enum hal_rx_buf_return_buf_manager ret_buf_mgr;
+
+ type = FIELD_GET(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
+ wbm_desc->info0);
+ /* We expect only WBM_REL buffer type */
+ if (type != HAL_WBM_REL_DESC_TYPE_REL_MSDU) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ rel_src = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
+ wbm_desc->info0);
+ if (rel_src != HAL_WBM_REL_SRC_MODULE_RXDMA &&
+ rel_src != HAL_WBM_REL_SRC_MODULE_REO)
+ return -EINVAL;
+
+ ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ wbm_desc->buf_addr_info.info1);
+ if (ret_buf_mgr != HAL_RX_BUF_RBM_SW1_BM &&
+ ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
+ ab->soc_stats.invalid_rbm++;
+ return -EINVAL;
+ }
+
+ rel_info->cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ wbm_desc->buf_addr_info.info1);
+ rel_info->err_rel_src = rel_src;
+ if (rel_src == HAL_WBM_REL_SRC_MODULE_REO) {
+ rel_info->push_reason =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON,
+ wbm_desc->info0);
+ rel_info->err_code =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE,
+ wbm_desc->info0);
+ } else {
+ rel_info->push_reason =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON,
+ wbm_desc->info0);
+ rel_info->err_code =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE,
+ wbm_desc->info0);
+ }
+
+ rel_info->first_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_FIRST_MSDU,
+ wbm_desc->info2);
+ rel_info->last_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_LAST_MSDU,
+ wbm_desc->info2);
+ return 0;
+}
+
+void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
+ dma_addr_t *paddr, u32 *desc_bank)
+{
+ struct ath11k_buffer_addr *buff_addr = desc;
+
+ *paddr = ((u64)(FIELD_GET(BUFFER_ADDR_INFO1_ADDR, buff_addr->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR, buff_addr->info0);
+
+ *desc_bank = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, buff_addr->info1);
+}
+
+void ath11k_hal_rx_msdu_link_desc_set(struct ath11k_base *ab, void *desc,
+ void *link_desc,
+ enum hal_wbm_rel_bm_act action)
+{
+ struct hal_wbm_release_ring *dst_desc = desc;
+ struct hal_wbm_release_ring *src_desc = link_desc;
+
+ dst_desc->buf_addr_info = src_desc->buf_addr_info;
+ dst_desc->info0 |= FIELD_PREP(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
+ HAL_WBM_REL_SRC_MODULE_SW) |
+ FIELD_PREP(HAL_WBM_RELEASE_INFO0_BM_ACTION, action) |
+ FIELD_PREP(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
+ HAL_WBM_REL_DESC_TYPE_MSDU_LINK);
+}
+
+void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_get_queue_stats_status *desc =
+ (struct hal_reo_get_queue_stats_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "Queue stats status:\n");
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "header: cmd_num %d status %d\n",
+ status->uniform_hdr.cmd_num,
+ status->uniform_hdr.cmd_status);
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "ssn %ld cur_idx %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN,
+ desc->info0),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX,
+ desc->info0));
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
+ desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
+ ath11k_dbg(ab, ATH11K_DBG_HAL,
+ "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
+ desc->last_rx_enqueue_timestamp,
+ desc->last_rx_dequeue_timestamp);
+ ath11k_dbg(ab, ATH11K_DBG_HAL,
+ "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
+ desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
+ desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
+ desc->rx_bitmap[6], desc->rx_bitmap[7]);
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "count: cur_mpdu %ld cur_msdu %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT,
+ desc->info1),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT,
+ desc->info1));
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "fwd_timeout %ld fwd_bar %ld dup_count %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT,
+ desc->info2),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT,
+ desc->info2),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT,
+ desc->info2));
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "frames_in_order %ld bar_rcvd %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT,
+ desc->info3),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT,
+ desc->info3));
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
+ desc->num_mpdu_frames, desc->num_msdu_frames,
+ desc->total_bytes);
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "late_rcvd %ld win_jump_2k %ld hole_cnt %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU,
+ desc->info4),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K,
+ desc->info4),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT,
+ desc->info4));
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "looping count %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT,
+ desc->info5));
+}
+
+int ath11k_hal_reo_process_status(u8 *reo_desc, u8 *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_status_hdr *hdr;
+
+ hdr = (struct hal_reo_status_hdr *)tlv->value;
+ *status = FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, hdr->info0);
+
+ return FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, hdr->info0);
+}
+
+void ath11k_hal_reo_flush_queue_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_flush_queue_status *desc =
+ (struct hal_reo_flush_queue_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+ status->u.flush_queue.err_detected =
+ FIELD_GET(HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED,
+ desc->info0);
+}
+
+void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_flush_cache_status *desc =
+ (struct hal_reo_flush_cache_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.flush_cache.err_detected =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR,
+ desc->info0);
+ status->u.flush_cache.err_code =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE,
+ desc->info0);
+ if (!status->u.flush_cache.err_code)
+ hal->avail_blk_resource |= BIT(hal->current_blk_index);
+
+ status->u.flush_cache.cache_controller_flush_status_hit =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT,
+ desc->info0);
+
+ status->u.flush_cache.cache_controller_flush_status_desc_type =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE,
+ desc->info0);
+ status->u.flush_cache.cache_controller_flush_status_client_id =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID,
+ desc->info0);
+ status->u.flush_cache.cache_controller_flush_status_err =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR,
+ desc->info0);
+ status->u.flush_cache.cache_controller_flush_status_cnt =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT,
+ desc->info0);
+}
+
+void ath11k_hal_reo_unblk_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_unblock_cache_status *desc =
+ (struct hal_reo_unblock_cache_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.unblock_cache.err_detected =
+ FIELD_GET(HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR,
+ desc->info0);
+ status->u.unblock_cache.unblock_type =
+ FIELD_GET(HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE,
+ desc->info0);
+
+ if (!status->u.unblock_cache.err_detected &&
+ status->u.unblock_cache.unblock_type ==
+ HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE)
+ hal->avail_blk_resource &= ~BIT(hal->current_blk_index);
+}
+
+void ath11k_hal_reo_flush_timeout_list_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_flush_timeout_list_status *desc =
+ (struct hal_reo_flush_timeout_list_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.timeout_list.err_detected =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR,
+ desc->info0);
+ status->u.timeout_list.list_empty =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY,
+ desc->info0);
+
+ status->u.timeout_list.release_desc_cnt =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT,
+ desc->info1);
+ status->u.timeout_list.fwd_buf_cnt =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT,
+ desc->info1);
+}
+
+void ath11k_hal_reo_desc_thresh_reached_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_desc_thresh_reached_status *desc =
+ (struct hal_reo_desc_thresh_reached_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.desc_thresh_reached.threshold_idx =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX,
+ desc->info0);
+
+ status->u.desc_thresh_reached.link_desc_counter0 =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0,
+ desc->info1);
+
+ status->u.desc_thresh_reached.link_desc_counter1 =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1,
+ desc->info2);
+
+ status->u.desc_thresh_reached.link_desc_counter2 =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2,
+ desc->info3);
+
+ status->u.desc_thresh_reached.link_desc_counter_sum =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM,
+ desc->info4);
+}
+
+void ath11k_hal_reo_update_rx_reo_queue_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_status_hdr *desc =
+ (struct hal_reo_status_hdr *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->info0);
+}
+
+u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
+{
+ u32 num_ext_desc;
+
+ if (ba_window_size <= 1) {
+ if (tid != HAL_DESC_REO_NON_QOS_TID)
+ num_ext_desc = 1;
+ else
+ num_ext_desc = 0;
+ } else if (ba_window_size <= 105) {
+ num_ext_desc = 1;
+ } else if (ba_window_size <= 210) {
+ num_ext_desc = 2;
+ } else {
+ num_ext_desc = 3;
+ }
+
+ return sizeof(struct hal_rx_reo_queue) +
+ (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
+}
+
+void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
+ u32 start_seq, enum hal_pn_type type)
+{
+ struct hal_rx_reo_queue *qdesc = vaddr;
+ struct hal_rx_reo_queue_ext *ext_desc;
+
+ memset(qdesc, 0, sizeof(*qdesc));
+
+ ath11k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
+
+ qdesc->rx_queue_num = FIELD_PREP(HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER, tid);
+
+ qdesc->info0 =
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_VLD, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_AC, ath11k_tid_to_ac(tid));
+
+ if (ba_window_size < 1)
+ ba_window_size = 1;
+
+ if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
+ ba_window_size++;
+
+ if (ba_window_size == 1)
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_RETRY, 1);
+
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE,
+ ba_window_size - 1);
+ switch (type) {
+ case HAL_PN_TYPE_NONE:
+ case HAL_PN_TYPE_WAPI_EVEN:
+ case HAL_PN_TYPE_WAPI_UNEVEN:
+ break;
+ case HAL_PN_TYPE_WPA:
+ qdesc->info0 |=
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_CHECK, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_SIZE,
+ HAL_RX_REO_QUEUE_PN_SIZE_48);
+ break;
+ }
+
+ /* TODO: Set Ignore ampdu flags based on BA window size and/or
+ * AMPDU capabilities
+ */
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG, 1);
+
+ qdesc->info1 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SVLD, 0);
+
+ if (start_seq <= 0xfff)
+ qdesc->info1 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SSN,
+ start_seq);
+
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ return;
+
+ ext_desc = qdesc->ext_desc;
+
+ /* TODO: HW queue descriptors are currently allocated for max BA
+ * window size for all QOS TIDs so that same descriptor can be used
+ * later when ADDBA request is received. This should be changed to
+ * allocate HW queue descriptors based on BA window size being
+ * negotiated (0 for non BA cases), and reallocate when BA window
+ * size changes and also send WMI message to FW to change the REO
+ * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
+ */
+ memset(ext_desc, 0, sizeof(*ext_desc));
+ ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
+ ext_desc++;
+ memset(ext_desc, 0, sizeof(*ext_desc));
+ ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
+ ext_desc++;
+ memset(ext_desc, 0, sizeof(*ext_desc));
+ ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
+}
+
+void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ struct hal_srng_params params;
+ struct hal_tlv_hdr *tlv;
+ struct hal_reo_get_queue_stats *desc;
+ int i, cmd_num = 1;
+ int entry_size;
+ u8 *entry;
+
+ memset(&params, 0, sizeof(params));
+
+ entry_size = ath11k_hal_srng_get_entrysize(ab, HAL_REO_CMD);
+ ath11k_hal_srng_get_params(ab, srng, &params);
+ entry = (u8 *)params.ring_base_vaddr;
+
+ for (i = 0; i < params.num_entries; i++) {
+ tlv = (struct hal_tlv_hdr *)entry;
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ desc->cmd.info0 =
+ FIELD_PREP(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, cmd_num++);
+ entry += entry_size;
+ }
+}
+
+#define HAL_MAX_UL_MU_USERS 37
+static inline void
+ath11k_hal_rx_handle_ofdma_info(void *rx_tlv,
+ struct hal_rx_user_status *rx_user_status)
+{
+ struct hal_rx_ppdu_end_user_stats *ppdu_end_user = rx_tlv;
+
+ rx_user_status->ul_ofdma_user_v0_word0 = __le32_to_cpu(ppdu_end_user->info6);
+
+ rx_user_status->ul_ofdma_user_v0_word1 = __le32_to_cpu(ppdu_end_user->info10);
+}
+
+static inline void
+ath11k_hal_rx_populate_byte_count(void *rx_tlv, void *ppduinfo,
+ struct hal_rx_user_status *rx_user_status)
+{
+ struct hal_rx_ppdu_end_user_stats *ppdu_end_user = rx_tlv;
+
+ rx_user_status->mpdu_ok_byte_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_OK_BYTE_COUNT,
+ __le32_to_cpu(ppdu_end_user->info8));
+ rx_user_status->mpdu_err_byte_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO9_MPDU_ERR_BYTE_COUNT,
+ __le32_to_cpu(ppdu_end_user->info9));
+}
+
+static inline void
+ath11k_hal_rx_populate_mu_user_info(void *rx_tlv, struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct hal_rx_user_status *rx_user_status)
+{
+ rx_user_status->ast_index = ppdu_info->ast_index;
+ rx_user_status->tid = ppdu_info->tid;
+ rx_user_status->tcp_msdu_count =
+ ppdu_info->tcp_msdu_count;
+ rx_user_status->udp_msdu_count =
+ ppdu_info->udp_msdu_count;
+ rx_user_status->other_msdu_count =
+ ppdu_info->other_msdu_count;
+ rx_user_status->frame_control = ppdu_info->frame_control;
+ rx_user_status->frame_control_info_valid =
+ ppdu_info->frame_control_info_valid;
+ rx_user_status->data_sequence_control_info_valid =
+ ppdu_info->data_sequence_control_info_valid;
+ rx_user_status->first_data_seq_ctrl =
+ ppdu_info->first_data_seq_ctrl;
+ rx_user_status->preamble_type = ppdu_info->preamble_type;
+ rx_user_status->ht_flags = ppdu_info->ht_flags;
+ rx_user_status->vht_flags = ppdu_info->vht_flags;
+ rx_user_status->he_flags = ppdu_info->he_flags;
+ rx_user_status->rs_flags = ppdu_info->rs_flags;
+
+ rx_user_status->mpdu_cnt_fcs_ok =
+ ppdu_info->num_mpdu_fcs_ok;
+ rx_user_status->mpdu_cnt_fcs_err =
+ ppdu_info->num_mpdu_fcs_err;
+
+ ath11k_hal_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
+}
+
+static u16 ath11k_hal_rx_mpduinfo_get_peerid(struct ath11k_base *ab,
+ struct hal_rx_mpdu_info *mpdu_info)
+{
+ return ab->hw_params.hw_ops->mpdu_info_get_peerid(mpdu_info);
+}
+
+static enum hal_rx_mon_status
+ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ u32 tlv_tag, u8 *tlv_data, u32 userid)
+{
+ u32 info0, info1, value;
+ u8 he_dcm = 0, he_stbc = 0;
+ u16 he_gi = 0, he_ltf = 0;
+
+ switch (tlv_tag) {
+ case HAL_RX_PPDU_START: {
+ struct hal_rx_ppdu_start *ppdu_start =
+ (struct hal_rx_ppdu_start *)tlv_data;
+
+ ppdu_info->ppdu_id =
+ FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
+ __le32_to_cpu(ppdu_start->info0));
+ ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num);
+ ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts);
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS: {
+ struct hal_rx_ppdu_end_user_stats *eu_stats =
+ (struct hal_rx_ppdu_end_user_stats *)tlv_data;
+
+ info0 = __le32_to_cpu(eu_stats->info0);
+ info1 = __le32_to_cpu(eu_stats->info1);
+
+ ppdu_info->ast_index =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX,
+ __le32_to_cpu(eu_stats->info2));
+ ppdu_info->tid =
+ ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO7_TID_BITMAP,
+ __le32_to_cpu(eu_stats->info7))) - 1;
+ ppdu_info->tcp_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info4));
+ ppdu_info->udp_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info4));
+ ppdu_info->other_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info5));
+ ppdu_info->tcp_ack_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info5));
+ ppdu_info->preamble_type =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE, info1);
+ ppdu_info->num_mpdu_fcs_ok =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK,
+ info1);
+ ppdu_info->num_mpdu_fcs_err =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR,
+ info0);
+ switch (ppdu_info->preamble_type) {
+ case HAL_RX_PREAMBLE_11N:
+ ppdu_info->ht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AC:
+ ppdu_info->vht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AX:
+ ppdu_info->he_flags = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (userid < HAL_MAX_UL_MU_USERS) {
+ struct hal_rx_user_status *rxuser_stats =
+ &ppdu_info->userstats;
+
+ ath11k_hal_rx_handle_ofdma_info(tlv_data, rxuser_stats);
+ ath11k_hal_rx_populate_mu_user_info(tlv_data, ppdu_info,
+ rxuser_stats);
+ }
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[0] =
+ __le32_to_cpu(eu_stats->rsvd1[0]);
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[1] =
+ __le32_to_cpu(eu_stats->rsvd1[1]);
+
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS_EXT: {
+ struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
+ (struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[2] = eu_stats->info1;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[3] = eu_stats->info2;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[4] = eu_stats->info3;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[5] = eu_stats->info4;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[6] = eu_stats->info5;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[7] = eu_stats->info6;
+ break;
+ }
+ case HAL_PHYRX_HT_SIG: {
+ struct hal_rx_ht_sig_info *ht_sig =
+ (struct hal_rx_ht_sig_info *)tlv_data;
+
+ info0 = __le32_to_cpu(ht_sig->info0);
+ info1 = __le32_to_cpu(ht_sig->info1);
+
+ ppdu_info->mcs = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO0_MCS, info0);
+ ppdu_info->bw = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO0_BW, info0);
+ ppdu_info->is_stbc = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO1_STBC,
+ info1);
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING, info1);
+ ppdu_info->gi = info1 & HAL_RX_HT_SIG_INFO_INFO1_GI;
+
+ switch (ppdu_info->mcs) {
+ case 0 ... 7:
+ ppdu_info->nss = 1;
+ break;
+ case 8 ... 15:
+ ppdu_info->nss = 2;
+ break;
+ case 16 ... 23:
+ ppdu_info->nss = 3;
+ break;
+ case 24 ... 31:
+ ppdu_info->nss = 4;
+ break;
+ }
+
+ if (ppdu_info->nss > 1)
+ ppdu_info->mcs = ppdu_info->mcs % 8;
+
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_L_SIG_B: {
+ struct hal_rx_lsig_b_info *lsigb =
+ (struct hal_rx_lsig_b_info *)tlv_data;
+
+ ppdu_info->rate = FIELD_GET(HAL_RX_LSIG_B_INFO_INFO0_RATE,
+ __le32_to_cpu(lsigb->info0));
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_L_SIG_A: {
+ struct hal_rx_lsig_a_info *lsiga =
+ (struct hal_rx_lsig_a_info *)tlv_data;
+
+ ppdu_info->rate = FIELD_GET(HAL_RX_LSIG_A_INFO_INFO0_RATE,
+ __le32_to_cpu(lsiga->info0));
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_VHT_SIG_A: {
+ struct hal_rx_vht_sig_a_info *vht_sig =
+ (struct hal_rx_vht_sig_a_info *)tlv_data;
+ u32 nsts;
+ u32 group_id;
+ u8 gi_setting;
+
+ info0 = __le32_to_cpu(vht_sig->info0);
+ info1 = __le32_to_cpu(vht_sig->info1);
+
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING,
+ info1);
+ ppdu_info->mcs = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_MCS,
+ info1);
+ gi_setting = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING,
+ info1);
+ switch (gi_setting) {
+ case HAL_RX_VHT_SIG_A_NORMAL_GI:
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ break;
+ case HAL_RX_VHT_SIG_A_SHORT_GI:
+ case HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY:
+ ppdu_info->gi = HAL_RX_GI_0_4_US;
+ break;
+ }
+
+ ppdu_info->is_stbc = info0 & HAL_RX_VHT_SIG_A_INFO_INFO0_STBC;
+ nsts = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS, info0);
+ if (ppdu_info->is_stbc && nsts > 0)
+ nsts = ((nsts + 1) >> 1) - 1;
+
+ ppdu_info->nss = (nsts & VHT_SIG_SU_NSS_MASK) + 1;
+ ppdu_info->bw = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_BW,
+ info0);
+ ppdu_info->beamformed = info1 &
+ HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED;
+ group_id = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID,
+ info0);
+ if (group_id == 0 || group_id == 63)
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ else
+ ppdu_info->reception_type =
+ HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ ppdu_info->vht_flag_values5 = group_id;
+ ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
+ ppdu_info->nss);
+ ppdu_info->vht_flag_values2 = ppdu_info->bw;
+ ppdu_info->vht_flag_values4 =
+ FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING, info1);
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_A_SU: {
+ struct hal_rx_he_sig_a_su_info *he_sig_a =
+ (struct hal_rx_he_sig_a_su_info *)tlv_data;
+
+ ppdu_info->he_flags = 1;
+ info0 = __le32_to_cpu(he_sig_a->info0);
+ info1 = __le32_to_cpu(he_sig_a->info1);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND, info0);
+
+ if (value == 0)
+ ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG;
+ else
+ ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU;
+
+ ppdu_info->he_data1 |=
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 |=
+ IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR, info0);
+ ppdu_info->he_data3 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS, info0);
+ ppdu_info->mcs = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, value);
+
+ he_dcm = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM, info0);
+ ppdu_info->dcm = he_dcm;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, he_dcm);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info1);
+ ppdu_info->ldpc = (value == HAL_RX_SU_MU_CODING_LDPC) ? 1 : 0;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA, info1);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
+ he_stbc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC, info1);
+ ppdu_info->is_stbc = he_stbc;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, he_stbc);
+
+ /* data4 */
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE, info0);
+ ppdu_info->he_data4 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
+
+ /* data5 */
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW, info0);
+ ppdu_info->bw = value;
+ ppdu_info->he_data5 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE, info0);
+ switch (value) {
+ case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_1_X;
+ break;
+ case 1:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 2:
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 3:
+ if (he_dcm && he_stbc) {
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ } else {
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
+ }
+ break;
+ }
+ ppdu_info->gi = he_gi;
+ he_gi = (he_gi != 0) ? he_gi - 1 : 0;
+ ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
+ ppdu_info->ltf_size = he_ltf;
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
+ (he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR, info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF, info1);
+ ppdu_info->beamformed = value;
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_TXBF, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM, info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
+
+ /* data6 */
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+ value++;
+ ppdu_info->nss = value;
+ ppdu_info->he_data6 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_NSTS, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND, info1);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION, info1);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
+
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_A_MU_DL: {
+ struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
+ (struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
+
+ info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
+ info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
+
+ ppdu_info->he_mu_flags = 1;
+
+ ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU;
+ ppdu_info->he_data1 |=
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 =
+ IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
+
+ /*data3*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR, info0);
+ ppdu_info->he_data3 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA, info1);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC, info1);
+ he_stbc = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, value);
+
+ /*data4*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE, info0);
+ ppdu_info->he_data4 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
+
+ /*data5*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
+ ppdu_info->bw = value;
+ ppdu_info->he_data5 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE, info0);
+ switch (value) {
+ case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ break;
+ case 1:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 2:
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 3:
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
+ break;
+ }
+ ppdu_info->gi = he_gi;
+ he_gi = (he_gi != 0) ? he_gi - 1 : 0;
+ ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
+ ppdu_info->ltf_size = he_ltf;
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
+ (he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB, info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR,
+ info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM,
+ info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
+
+ /*data6*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION,
+ info0);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION, info1);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
+
+ /* HE-MU Flags */
+ /* HE-MU-flags1 */
+ ppdu_info->he_flags1 =
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN;
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB, info0);
+ ppdu_info->he_flags1 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN,
+ value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB, info0);
+ ppdu_info->he_flags1 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN,
+ value);
+
+ /* HE-MU-flags2 */
+ ppdu_info->he_flags2 =
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN;
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
+ ppdu_info->he_flags2 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW,
+ value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB, info0);
+ ppdu_info->he_flags2 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB, info0);
+ value = value - 1;
+ ppdu_info->he_flags2 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS,
+ value);
+
+ ppdu_info->is_stbc = info1 &
+ HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_B1_MU: {
+ struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu =
+ (struct hal_rx_he_sig_b1_mu_info *)tlv_data;
+ u16 ru_tones;
+
+ info0 = __le32_to_cpu(he_sig_b1_mu->info0);
+
+ ru_tones = FIELD_GET(HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION,
+ info0);
+ ppdu_info->ru_alloc =
+ ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(ru_tones);
+ ppdu_info->he_RU[0] = ru_tones;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_B2_MU: {
+ struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu =
+ (struct hal_rx_he_sig_b2_mu_info *)tlv_data;
+
+ info0 = __le32_to_cpu(he_sig_b2_mu->info0);
+
+ ppdu_info->he_data1 |= IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
+
+ ppdu_info->mcs =
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING, info0);
+ ppdu_info->ldpc = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID, info0);
+ ppdu_info->he_data4 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
+
+ ppdu_info->nss =
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS, info0) + 1;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_B2_OFDMA: {
+ struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma =
+ (struct hal_rx_he_sig_b2_ofdma_info *)tlv_data;
+
+ info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
+
+ ppdu_info->he_data1 |=
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
+
+ /* HE-data2 */
+ ppdu_info->he_data2 |= IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN;
+
+ ppdu_info->mcs =
+ FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS,
+ info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM, info0);
+ he_dcm = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING, info0);
+ ppdu_info->ldpc = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+
+ /* HE-data4 */
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID, info0);
+ ppdu_info->he_data4 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
+
+ ppdu_info->nss =
+ FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS,
+ info0) + 1;
+ ppdu_info->beamformed =
+ info0 & HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
+ break;
+ }
+ case HAL_PHYRX_RSSI_LEGACY: {
+ int i;
+ bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map);
+ struct hal_rx_phyrx_rssi_legacy_info *rssi =
+ (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
+
+ /* TODO: Please note that the combined rssi will not be accurate
+ * in MU case. Rssi in MU needs to be retrieved from
+ * PHYRX_OTHER_RECEIVE_INFO TLV.
+ */
+ ppdu_info->rssi_comb =
+ FIELD_GET(HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB,
+ __le32_to_cpu(rssi->info0));
+
+ if (db2dbm) {
+ for (i = 0; i < ARRAY_SIZE(rssi->preamble); i++) {
+ ppdu_info->rssi_chain_pri20[i] =
+ le32_get_bits(rssi->preamble[i].rssi_2040,
+ HAL_RX_PHYRX_RSSI_PREAMBLE_PRI20);
+ }
+ }
+ break;
+ }
+ case HAL_RX_MPDU_START: {
+ struct hal_rx_mpdu_info *mpdu_info =
+ (struct hal_rx_mpdu_info *)tlv_data;
+ u16 peer_id;
+
+ peer_id = ath11k_hal_rx_mpduinfo_get_peerid(ab, mpdu_info);
+ if (peer_id)
+ ppdu_info->peer_id = peer_id;
+ break;
+ }
+ case HAL_RXPCU_PPDU_END_INFO: {
+ struct hal_rx_ppdu_end_duration *ppdu_rx_duration =
+ (struct hal_rx_ppdu_end_duration *)tlv_data;
+ ppdu_info->rx_duration =
+ FIELD_GET(HAL_RX_PPDU_END_DURATION,
+ __le32_to_cpu(ppdu_rx_duration->info0));
+ ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]);
+ ppdu_info->tsft = (ppdu_info->tsft << 32) |
+ __le32_to_cpu(ppdu_rx_duration->rsvd0[0]);
+ break;
+ }
+ case HAL_DUMMY:
+ return HAL_RX_MON_STATUS_BUF_DONE;
+ case HAL_RX_PPDU_END_STATUS_DONE:
+ case 0:
+ return HAL_RX_MON_STATUS_PPDU_DONE;
+ default:
+ break;
+ }
+
+ return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+}
+
+enum hal_rx_mon_status
+ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct sk_buff *skb)
+{
+ struct hal_tlv_hdr *tlv;
+ enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
+ u16 tlv_tag;
+ u16 tlv_len;
+ u32 tlv_userid = 0;
+ u8 *ptr = skb->data;
+
+ do {
+ tlv = (struct hal_tlv_hdr *)ptr;
+ tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl);
+ tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
+ tlv_userid = FIELD_GET(HAL_TLV_USR_ID, tlv->tl);
+ ptr += sizeof(*tlv);
+
+ /* The actual length of PPDU_END is the combined length of many PHY
+ * TLVs that follow. Skip the TLV header and
+ * rx_rxpcu_classification_overview that follows the header to get to
+ * next TLV.
+ */
+ if (tlv_tag == HAL_RX_PPDU_END)
+ tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
+
+ hal_status = ath11k_hal_rx_parse_mon_status_tlv(ab, ppdu_info,
+ tlv_tag, ptr, tlv_userid);
+ ptr += tlv_len;
+ ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
+
+ if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
+ break;
+ } while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE);
+
+ return hal_status;
+}
+
+void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
+ u32 *sw_cookie, void **pp_buf_addr,
+ u8 *rbm, u32 *msdu_cnt)
+{
+ struct hal_reo_entrance_ring *reo_ent_ring = rx_desc;
+ struct ath11k_buffer_addr *buf_addr_info;
+ struct rx_mpdu_desc *rx_mpdu_desc_info_details;
+
+ rx_mpdu_desc_info_details =
+ (struct rx_mpdu_desc *)&reo_ent_ring->rx_mpdu_info;
+
+ *msdu_cnt = FIELD_GET(RX_MPDU_DESC_INFO0_MSDU_COUNT,
+ rx_mpdu_desc_info_details->info0);
+
+ buf_addr_info = (struct ath11k_buffer_addr *)&reo_ent_ring->buf_addr_info;
+
+ *paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
+ buf_addr_info->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ buf_addr_info->info0);
+
+ *sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ buf_addr_info->info1);
+ *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ buf_addr_info->info1);
+
+ *pp_buf_addr = (void *)buf_addr_info;
+}
+
+void
+ath11k_hal_rx_sw_mon_ring_buf_paddr_get(void *rx_desc,
+ struct hal_sw_mon_ring_entries *sw_mon_entries)
+{
+ struct hal_sw_monitor_ring *sw_mon_ring = rx_desc;
+ struct ath11k_buffer_addr *buf_addr_info;
+ struct ath11k_buffer_addr *status_buf_addr_info;
+ struct rx_mpdu_desc *rx_mpdu_desc_info_details;
+
+ rx_mpdu_desc_info_details = &sw_mon_ring->rx_mpdu_info;
+
+ sw_mon_entries->msdu_cnt = FIELD_GET(RX_MPDU_DESC_INFO0_MSDU_COUNT,
+ rx_mpdu_desc_info_details->info0);
+
+ buf_addr_info = &sw_mon_ring->buf_addr_info;
+ status_buf_addr_info = &sw_mon_ring->status_buf_addr_info;
+
+ sw_mon_entries->mon_dst_paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
+ buf_addr_info->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ buf_addr_info->info0);
+
+ sw_mon_entries->mon_status_paddr =
+ (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
+ status_buf_addr_info->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ status_buf_addr_info->info0);
+
+ sw_mon_entries->mon_dst_sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ buf_addr_info->info1);
+
+ sw_mon_entries->mon_status_sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ status_buf_addr_info->info1);
+
+ sw_mon_entries->status_buf_count = FIELD_GET(HAL_SW_MON_RING_INFO0_STATUS_BUF_CNT,
+ sw_mon_ring->info0);
+
+ sw_mon_entries->dst_buf_addr_info = buf_addr_info;
+ sw_mon_entries->status_buf_addr_info = status_buf_addr_info;
+
+ sw_mon_entries->ppdu_id =
+ FIELD_GET(HAL_SW_MON_RING_INFO1_PHY_PPDU_ID, sw_mon_ring->info1);
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
new file mode 100644
index 000000000000..0fa9aef9d533
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -0,0 +1,520 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_HAL_RX_H
+#define ATH11K_HAL_RX_H
+
+struct hal_rx_wbm_rel_info {
+ u32 cookie;
+ enum hal_wbm_rel_src_module err_rel_src;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 err_code;
+ bool first_msdu;
+ bool last_msdu;
+};
+
+#define HAL_INVALID_PEERID 0xffff
+#define VHT_SIG_SU_NSS_MASK 0x7
+
+#define HAL_RX_MAX_MCS 12
+#define HAL_RX_MAX_NSS 8
+
+struct hal_rx_mon_status_tlv_hdr {
+ u32 hdr;
+ u8 value[];
+};
+
+enum hal_rx_su_mu_coding {
+ HAL_RX_SU_MU_CODING_BCC,
+ HAL_RX_SU_MU_CODING_LDPC,
+ HAL_RX_SU_MU_CODING_MAX,
+};
+
+enum hal_rx_gi {
+ HAL_RX_GI_0_8_US,
+ HAL_RX_GI_0_4_US,
+ HAL_RX_GI_1_6_US,
+ HAL_RX_GI_3_2_US,
+ HAL_RX_GI_MAX,
+};
+
+enum hal_rx_bw {
+ HAL_RX_BW_20MHZ,
+ HAL_RX_BW_40MHZ,
+ HAL_RX_BW_80MHZ,
+ HAL_RX_BW_160MHZ,
+ HAL_RX_BW_MAX,
+};
+
+enum hal_rx_preamble {
+ HAL_RX_PREAMBLE_11A,
+ HAL_RX_PREAMBLE_11B,
+ HAL_RX_PREAMBLE_11N,
+ HAL_RX_PREAMBLE_11AC,
+ HAL_RX_PREAMBLE_11AX,
+ HAL_RX_PREAMBLE_MAX,
+};
+
+enum hal_rx_reception_type {
+ HAL_RX_RECEPTION_TYPE_SU,
+ HAL_RX_RECEPTION_TYPE_MU_MIMO,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO,
+ HAL_RX_RECEPTION_TYPE_MAX,
+};
+
+#define HAL_RX_FCS_LEN 4
+
+enum hal_rx_mon_status {
+ HAL_RX_MON_STATUS_PPDU_NOT_DONE,
+ HAL_RX_MON_STATUS_PPDU_DONE,
+ HAL_RX_MON_STATUS_BUF_DONE,
+};
+
+struct hal_rx_user_status {
+ u32 mcs:4,
+ nss:3,
+ ofdma_info_valid:1,
+ dl_ofdma_ru_start_index:7,
+ dl_ofdma_ru_width:7,
+ dl_ofdma_ru_size:8;
+ u32 ul_ofdma_user_v0_word0;
+ u32 ul_ofdma_user_v0_word1;
+ u32 ast_index;
+ u32 tid;
+ u16 tcp_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 frame_control;
+ u8 frame_control_info_valid;
+ u8 data_sequence_control_info_valid;
+ u16 first_data_seq_ctrl;
+ u32 preamble_type;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u8 rs_flags;
+ u32 mpdu_cnt_fcs_ok;
+ u32 mpdu_cnt_fcs_err;
+ u32 mpdu_fcs_ok_bitmap[8];
+ u32 mpdu_ok_byte_count;
+ u32 mpdu_err_byte_count;
+};
+
+#define HAL_TLV_STATUS_PPDU_NOT_DONE HAL_RX_MON_STATUS_PPDU_NOT_DONE
+#define HAL_TLV_STATUS_PPDU_DONE HAL_RX_MON_STATUS_PPDU_DONE
+#define HAL_TLV_STATUS_BUF_DONE HAL_RX_MON_STATUS_BUF_DONE
+
+struct hal_sw_mon_ring_entries {
+ dma_addr_t mon_dst_paddr;
+ dma_addr_t mon_status_paddr;
+ u32 mon_dst_sw_cookie;
+ u32 mon_status_sw_cookie;
+ void *dst_buf_addr_info;
+ void *status_buf_addr_info;
+ u16 ppdu_id;
+ u8 status_buf_count;
+ u8 msdu_cnt;
+ bool end_of_ppdu;
+ bool drop_ppdu;
+};
+
+struct hal_rx_mon_ppdu_info {
+ u32 ppdu_id;
+ u32 ppdu_ts;
+ u32 num_mpdu_fcs_ok;
+ u32 num_mpdu_fcs_err;
+ u32 preamble_type;
+ u16 chan_num;
+ u16 tcp_msdu_count;
+ u16 tcp_ack_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 peer_id;
+ u8 rate;
+ u8 mcs;
+ u8 nss;
+ u8 bw;
+ u8 vht_flag_values1;
+ u8 vht_flag_values2;
+ u8 vht_flag_values3[4];
+ u8 vht_flag_values4;
+ u8 vht_flag_values5;
+ u16 vht_flag_values6;
+ u8 is_stbc;
+ u8 gi;
+ u8 ldpc;
+ u8 beamformed;
+ u8 rssi_comb;
+ u8 rssi_chain_pri20[HAL_RX_MAX_NSS];
+ u16 tid;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u16 he_mu_flags;
+ u8 dcm;
+ u8 ru_alloc;
+ u8 reception_type;
+ u64 tsft;
+ u64 rx_duration;
+ u16 frame_control;
+ u32 ast_index;
+ u8 rs_fcs_err;
+ u8 rs_flags;
+ u8 cck_flag;
+ u8 ofdm_flag;
+ u8 ulofdma_flag;
+ u8 frame_control_info_valid;
+ u16 he_per_user_1;
+ u16 he_per_user_2;
+ u8 he_per_user_position;
+ u8 he_per_user_known;
+ u16 he_flags1;
+ u16 he_flags2;
+ u8 he_RU[4];
+ u16 he_data1;
+ u16 he_data2;
+ u16 he_data3;
+ u16 he_data4;
+ u16 he_data5;
+ u16 he_data6;
+ u32 ppdu_len;
+ u32 prev_ppdu_id;
+ u32 device_id;
+ u16 first_data_seq_ctrl;
+ u8 monitor_direct_used;
+ u8 data_sequence_control_info_valid;
+ u8 ltf_size;
+ u8 rxpcu_filter_pass;
+ char rssi_chain[8][8];
+ struct hal_rx_user_status userstats;
+};
+
+#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
+
+struct hal_rx_ppdu_start {
+ __le32 info0;
+ __le32 chan_num;
+ __le32 ppdu_start_ts;
+} __packed;
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(25, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(8, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(9)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(10)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(11)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(23, 20)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO2_FRAME_CTRL GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO3_QOS_CTRL GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO7_TID_BITMAP GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO7_TID_EOSP_BITMAP GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO8_MPDU_OK_BYTE_COUNT GENMASK(24, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO9_MPDU_ERR_BYTE_COUNT GENMASK(24, 0)
+
+struct hal_rx_ppdu_end_user_stats {
+ __le32 rsvd0[2];
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 ht_ctrl;
+ __le32 rsvd1[2];
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le32 info7;
+ __le32 rsvd2[4];
+ __le32 info8;
+ __le32 rsvd3;
+ __le32 info9;
+ __le32 rsvd4[2];
+ __le32 info10;
+} __packed;
+
+struct hal_rx_ppdu_end_user_stats_ext {
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 info4;
+ u32 info5;
+ u32 info6;
+} __packed;
+
+#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
+#define HAL_RX_HT_SIG_INFO_INFO0_BW BIT(7)
+
+#define HAL_RX_HT_SIG_INFO_INFO1_STBC GENMASK(5, 4)
+#define HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING BIT(6)
+#define HAL_RX_HT_SIG_INFO_INFO1_GI BIT(7)
+
+struct hal_rx_ht_sig_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_LSIG_B_INFO_INFO0_RATE GENMASK(3, 0)
+#define HAL_RX_LSIG_B_INFO_INFO0_LEN GENMASK(15, 4)
+
+struct hal_rx_lsig_b_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_LSIG_A_INFO_INFO0_RATE GENMASK(3, 0)
+#define HAL_RX_LSIG_A_INFO_INFO0_LEN GENMASK(16, 5)
+#define HAL_RX_LSIG_A_INFO_INFO0_PKT_TYPE GENMASK(27, 24)
+
+struct hal_rx_lsig_a_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_BW GENMASK(1, 0)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_STBC BIT(3)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID GENMASK(9, 4)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS GENMASK(21, 10)
+
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING GENMASK(1, 0)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING BIT(2)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_MCS GENMASK(7, 4)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED BIT(8)
+
+struct hal_rx_vht_sig_a_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+enum hal_rx_vht_sig_a_gi_setting {
+ HAL_RX_VHT_SIG_A_NORMAL_GI = 0,
+ HAL_RX_VHT_SIG_A_SHORT_GI = 1,
+ HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY = 3,
+};
+
+#define HAL_RX_SU_MU_CODING_LDPC 0x01
+
+#define HE_GI_0_8 0
+#define HE_GI_0_4 1
+#define HE_GI_1_6 2
+#define HE_GI_3_2 3
+
+#define HE_LTF_1_X 0
+#define HE_LTF_2_X 1
+#define HE_LTF_4_X 2
+#define HE_LTF_UNKNOWN 3
+
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS GENMASK(6, 3)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW GENMASK(20, 19)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE GENMASK(22, 21)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS GENMASK(25, 23)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR GENMASK(13, 8)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE GENMASK(18, 15)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND BIT(0)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE BIT(1)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG BIT(2)
+
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA BIT(8)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC BIT(9)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF BIT(10)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR GENMASK(12, 11)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM BIT(13)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND BIT(15)
+
+struct hal_rx_he_sig_a_su_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG BIT(1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB GENMASK(3, 1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB BIT(4)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR GENMASK(10, 5)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE GENMASK(14, 11)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB GENMASK(21, 18)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB BIT(22)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION BIT(25)
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB GENMASK(10, 8)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA BIT(11)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC BIT(12)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXBF BIT(10)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR GENMASK(14, 13)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM BIT(15)
+
+struct hal_rx_he_sig_a_mu_dl_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION GENMASK(7, 0)
+
+struct hal_rx_he_sig_b1_mu_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID GENMASK(10, 0)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS GENMASK(18, 15)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING BIT(20)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS GENMASK(31, 29)
+
+struct hal_rx_he_sig_b2_mu_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID GENMASK(10, 0)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING BIT(20)
+
+struct hal_rx_he_sig_b2_ofdma_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB GENMASK(15, 8)
+
+#define HAL_RX_PHYRX_RSSI_PREAMBLE_PRI20 GENMASK(7, 0)
+
+struct hal_rx_phyrx_chain_rssi {
+ __le32 rssi_2040;
+ __le32 rssi_80;
+} __packed;
+
+struct hal_rx_phyrx_rssi_legacy_info {
+ __le32 rsvd[3];
+ struct hal_rx_phyrx_chain_rssi pre_rssi[HAL_RX_MAX_NSS];
+ struct hal_rx_phyrx_chain_rssi preamble[HAL_RX_MAX_NSS];
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_MPDU_INFO_INFO0_PEERID GENMASK(31, 16)
+#define HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855 GENMASK(15, 0)
+#define HAL_RX_MPDU_INFO_INFO1_MPDU_LEN GENMASK(13, 0)
+
+struct hal_rx_mpdu_info_ipq8074 {
+ __le32 rsvd0;
+ __le32 info0;
+ __le32 rsvd1[11];
+ __le32 info1;
+ __le32 rsvd2[9];
+} __packed;
+
+struct hal_rx_mpdu_info_qcn9074 {
+ __le32 rsvd0[10];
+ __le32 info0;
+ __le32 rsvd1[2];
+ __le32 info1;
+ __le32 rsvd2[9];
+} __packed;
+
+struct hal_rx_mpdu_info_wcn6855 {
+ __le32 rsvd0[8];
+ __le32 info0;
+ __le32 rsvd1[14];
+} __packed;
+
+struct hal_rx_mpdu_info {
+ union {
+ struct hal_rx_mpdu_info_ipq8074 ipq8074;
+ struct hal_rx_mpdu_info_qcn9074 qcn9074;
+ struct hal_rx_mpdu_info_wcn6855 wcn6855;
+ } u;
+} __packed;
+
+#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0)
+struct hal_rx_ppdu_end_duration {
+ __le32 rsvd0[9];
+ __le32 info0;
+ __le32 rsvd1[4];
+} __packed;
+
+struct hal_rx_rxpcu_classification_overview {
+ u32 rsvd0;
+} __packed;
+
+struct hal_rx_msdu_desc_info {
+ u32 msdu_flags;
+ u16 msdu_len; /* 14 bits for length */
+};
+
+#define HAL_RX_NUM_MSDU_DESC 6
+struct hal_rx_msdu_list {
+ struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC];
+ u32 sw_cookie[HAL_RX_NUM_MSDU_DESC];
+ u8 rbm[HAL_RX_NUM_MSDU_DESC];
+};
+
+void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_queue_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_unblk_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_timeout_list_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_desc_thresh_reached_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_update_rx_reo_queue_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status);
+int ath11k_hal_reo_process_status(u8 *reo_desc, u8 *status);
+void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus,
+ u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm);
+void ath11k_hal_rx_msdu_link_desc_set(struct ath11k_base *ab, void *desc,
+ void *link_desc,
+ enum hal_wbm_rel_bm_act action);
+void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr,
+ u32 cookie, u8 manager);
+void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm);
+int ath11k_hal_desc_reo_parse_err(struct ath11k_base *ab, u32 *rx_desc,
+ dma_addr_t *paddr, u32 *desc_bank);
+int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info);
+void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
+ dma_addr_t *paddr, u32 *desc_bank);
+void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc,
+ dma_addr_t *paddr, u32 *sw_cookie,
+ void **pp_buf_addr_info, u8 *rbm,
+ u32 *msdu_cnt);
+void
+ath11k_hal_rx_sw_mon_ring_buf_paddr_get(void *rx_desc,
+ struct hal_sw_mon_ring_entries *sw_mon_ent);
+enum hal_rx_mon_status
+ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct sk_buff *skb);
+
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3 0xCDBEEF
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c
new file mode 100644
index 000000000000..b919df6ce743
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "hal_desc.h"
+#include "hal.h"
+#include "hal_tx.h"
+#include "hif.h"
+
+#define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
+
+/* dscp_tid_map - Default DSCP-TID mapping
+ *
+ * DSCP TID
+ * 000000 0
+ * 001000 1
+ * 010000 2
+ * 011000 3
+ * 100000 4
+ * 101000 5
+ * 110000 6
+ * 111000 7
+ */
+static const u8 dscp_tid_map[DSCP_TID_MAP_TBL_ENTRY_SIZE] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
+ struct hal_tx_info *ti)
+{
+ struct hal_tcl_data_cmd *tcl_cmd = cmd;
+
+ tcl_cmd->buf_addr_info.info0 =
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, ti->paddr);
+ tcl_cmd->buf_addr_info.info1 =
+ FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
+ ((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
+ tcl_cmd->buf_addr_info.info1 |=
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, ti->rbm_id) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
+
+ tcl_cmd->info0 =
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_DESC_TYPE, ti->type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE, ti->encap_type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE,
+ ti->encrypt_type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE,
+ ti->search_type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ADDR_EN,
+ ti->addr_search_flags) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_CMD_NUM,
+ ti->meta_data_flags);
+
+ tcl_cmd->info1 = ti->flags0 |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_DATA_LEN, ti->data_len) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET, ti->pkt_offset);
+
+ tcl_cmd->info2 = ti->flags1 |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID, ti->tid) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_LMAC_ID, ti->lmac_id);
+
+ tcl_cmd->info3 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX,
+ ti->dscp_tid_tbl_idx) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX,
+ ti->bss_ast_idx) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM,
+ ti->bss_ast_hash);
+ tcl_cmd->info4 = 0;
+
+ if (ti->enable_mesh)
+ ab->hw_params.hw_ops->tx_mesh_enable(ab, tcl_cmd);
+}
+
+void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id)
+{
+ u32 ctrl_reg_val;
+ u32 addr;
+ u8 hw_map_val[HAL_DSCP_TID_TBL_SIZE];
+ int i;
+ u32 value;
+ int cnt = 0;
+
+ ctrl_reg_val = ath11k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG);
+ /* Enable read/write access */
+ ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+ ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
+
+ addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
+ (4 * id * (HAL_DSCP_TID_TBL_SIZE / 4));
+
+ /* Configure each DSCP-TID mapping in three bits there by configure
+ * three bytes in an iteration.
+ */
+ for (i = 0; i < DSCP_TID_MAP_TBL_ENTRY_SIZE; i += 8) {
+ value = FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP0,
+ dscp_tid_map[i]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP1,
+ dscp_tid_map[i + 1]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP2,
+ dscp_tid_map[i + 2]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP3,
+ dscp_tid_map[i + 3]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP4,
+ dscp_tid_map[i + 4]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP5,
+ dscp_tid_map[i + 5]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP6,
+ dscp_tid_map[i + 6]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP7,
+ dscp_tid_map[i + 7]);
+ memcpy(&hw_map_val[cnt], (u8 *)&value, 3);
+ cnt += 3;
+ }
+
+ for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
+ ath11k_hif_write32(ab, addr, *(u32 *)&hw_map_val[i]);
+ addr += 4;
+ }
+
+ /* Disable read/write access */
+ ctrl_reg_val = ath11k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG);
+ ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+ ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG,
+ ctrl_reg_val);
+}
+
+void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ struct hal_srng_params params;
+ struct hal_tlv_hdr *tlv;
+ int i, entry_size;
+ u8 *desc;
+
+ memset(&params, 0, sizeof(params));
+
+ entry_size = ath11k_hal_srng_get_entrysize(ab, HAL_TCL_DATA);
+ ath11k_hal_srng_get_params(ab, srng, &params);
+ desc = (u8 *)params.ring_base_vaddr;
+
+ for (i = 0; i < params.num_entries; i++) {
+ tlv = (struct hal_tlv_hdr *)desc;
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_TCL_DATA_CMD) |
+ FIELD_PREP(HAL_TLV_HDR_LEN,
+ sizeof(struct hal_tcl_data_cmd));
+ desc += entry_size;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h
new file mode 100644
index 000000000000..46d17abd808b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_HAL_TX_H
+#define ATH11K_HAL_TX_H
+
+#include "hal_desc.h"
+#include "core.h"
+
+#define HAL_TX_ADDRX_EN 1
+#define HAL_TX_ADDRY_EN 2
+
+#define HAL_TX_ADDR_SEARCH_DEFAULT 0
+#define HAL_TX_ADDR_SEARCH_INDEX 1
+
+struct hal_tx_info {
+ u16 meta_data_flags; /* %HAL_TCL_DATA_CMD_INFO0_META_ */
+ u8 ring_id;
+ u32 desc_id;
+ enum hal_tcl_desc_type type;
+ enum hal_tcl_encap_type encap_type;
+ dma_addr_t paddr;
+ u32 data_len;
+ u32 pkt_offset;
+ enum hal_encrypt_type encrypt_type;
+ u32 flags0; /* %HAL_TCL_DATA_CMD_INFO1_ */
+ u32 flags1; /* %HAL_TCL_DATA_CMD_INFO2_ */
+ u16 addr_search_flags; /* %HAL_TCL_DATA_CMD_INFO0_ADDR(X/Y)_ */
+ u16 bss_ast_hash;
+ u16 bss_ast_idx;
+ u8 tid;
+ u8 search_type; /* %HAL_TX_ADDR_SEARCH_ */
+ u8 lmac_id;
+ u8 dscp_tid_tbl_idx;
+ bool enable_mesh;
+ u8 rbm_id;
+};
+
+/* TODO: Check if the actual desc macros can be used instead */
+#define HAL_TX_STATUS_FLAGS_FIRST_MSDU BIT(0)
+#define HAL_TX_STATUS_FLAGS_LAST_MSDU BIT(1)
+#define HAL_TX_STATUS_FLAGS_MSDU_IN_AMSDU BIT(2)
+#define HAL_TX_STATUS_FLAGS_RATE_STATS_VALID BIT(3)
+#define HAL_TX_STATUS_FLAGS_RATE_LDPC BIT(4)
+#define HAL_TX_STATUS_FLAGS_RATE_STBC BIT(5)
+#define HAL_TX_STATUS_FLAGS_OFDMA BIT(6)
+
+#define HAL_TX_STATUS_DESC_LEN sizeof(struct hal_wbm_release_ring)
+
+/* Tx status parsed from srng desc */
+struct hal_tx_status {
+ enum hal_wbm_rel_src_module buf_rel_source;
+ enum hal_wbm_tqm_rel_reason status;
+ s8 ack_rssi;
+ u32 flags; /* %HAL_TX_STATUS_FLAGS_ */
+ u32 ppdu_id;
+ u8 try_cnt;
+ u8 tid;
+ u16 peer_id;
+ u32 rate_stats;
+};
+
+void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
+ struct hal_tx_info *ti);
+void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id);
+int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd);
+void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab,
+ struct hal_srng *srng);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
new file mode 100644
index 000000000000..cd9c4b838246
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _HIF_H_
+#define _HIF_H_
+
+#include "core.h"
+
+struct ath11k_hif_ops {
+ u32 (*read32)(struct ath11k_base *ab, u32 address);
+ void (*write32)(struct ath11k_base *ab, u32 address, u32 data);
+ int (*read)(struct ath11k_base *ab, void *buf, u32 start, u32 end);
+ void (*irq_enable)(struct ath11k_base *ab);
+ void (*irq_disable)(struct ath11k_base *ab);
+ int (*start)(struct ath11k_base *ab);
+ void (*stop)(struct ath11k_base *ab);
+ int (*power_up)(struct ath11k_base *ab);
+ void (*power_down)(struct ath11k_base *ab, bool is_suspend);
+ int (*suspend)(struct ath11k_base *ab);
+ int (*resume)(struct ath11k_base *ab);
+ int (*map_service_to_pipe)(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+ int (*get_user_msi_vector)(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector);
+ void (*get_msi_address)(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi);
+ void (*ce_irq_enable)(struct ath11k_base *ab);
+ void (*ce_irq_disable)(struct ath11k_base *ab);
+ void (*get_ce_msi_idx)(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
+ void (*coredump_download)(struct ath11k_base *ab);
+};
+
+static inline void ath11k_hif_ce_irq_enable(struct ath11k_base *ab)
+{
+ if (ab->hif.ops->ce_irq_enable)
+ ab->hif.ops->ce_irq_enable(ab);
+}
+
+static inline void ath11k_hif_ce_irq_disable(struct ath11k_base *ab)
+{
+ if (ab->hif.ops->ce_irq_disable)
+ ab->hif.ops->ce_irq_disable(ab);
+}
+
+static inline int ath11k_hif_start(struct ath11k_base *ab)
+{
+ return ab->hif.ops->start(ab);
+}
+
+static inline void ath11k_hif_stop(struct ath11k_base *ab)
+{
+ ab->hif.ops->stop(ab);
+}
+
+static inline void ath11k_hif_irq_enable(struct ath11k_base *ab)
+{
+ ab->hif.ops->irq_enable(ab);
+}
+
+static inline void ath11k_hif_irq_disable(struct ath11k_base *ab)
+{
+ ab->hif.ops->irq_disable(ab);
+}
+
+static inline int ath11k_hif_power_up(struct ath11k_base *ab)
+{
+ if (!ab->hif.ops->power_up)
+ return -EOPNOTSUPP;
+
+ return ab->hif.ops->power_up(ab);
+}
+
+static inline void ath11k_hif_power_down(struct ath11k_base *ab, bool is_suspend)
+{
+ if (!ab->hif.ops->power_down)
+ return;
+
+ ab->hif.ops->power_down(ab, is_suspend);
+}
+
+static inline int ath11k_hif_suspend(struct ath11k_base *ab)
+{
+ if (ab->hif.ops->suspend)
+ return ab->hif.ops->suspend(ab);
+
+ return 0;
+}
+
+static inline int ath11k_hif_resume(struct ath11k_base *ab)
+{
+ if (ab->hif.ops->resume)
+ return ab->hif.ops->resume(ab);
+
+ return 0;
+}
+
+static inline u32 ath11k_hif_read32(struct ath11k_base *ab, u32 address)
+{
+ return ab->hif.ops->read32(ab, address);
+}
+
+static inline void ath11k_hif_write32(struct ath11k_base *ab, u32 address, u32 data)
+{
+ ab->hif.ops->write32(ab, address, data);
+}
+
+static inline int ath11k_hif_read(struct ath11k_base *ab, void *buf,
+ u32 start, u32 end)
+{
+ if (!ab->hif.ops->read)
+ return -EOPNOTSUPP;
+
+ return ab->hif.ops->read(ab, buf, start, end);
+}
+
+static inline int ath11k_hif_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ return ab->hif.ops->map_service_to_pipe(ab, service_id, ul_pipe, dl_pipe);
+}
+
+static inline int ath11k_get_user_msi_vector(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ if (!ab->hif.ops->get_user_msi_vector)
+ return -EOPNOTSUPP;
+
+ return ab->hif.ops->get_user_msi_vector(ab, user_name, num_vectors,
+ user_base_data,
+ base_vector);
+}
+
+static inline void ath11k_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi)
+{
+ if (!ab->hif.ops->get_msi_address)
+ return;
+
+ ab->hif.ops->get_msi_address(ab, msi_addr_lo, msi_addr_hi);
+}
+
+static inline void ath11k_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id,
+ u32 *msi_data_idx)
+{
+ if (ab->hif.ops->get_ce_msi_idx)
+ ab->hif.ops->get_ce_msi_idx(ab, ce_id, msi_data_idx);
+ else
+ *msi_data_idx = ce_id;
+}
+
+static inline void ath11k_hif_coredump_download(struct ath11k_base *ab)
+{
+ if (ab->hif.ops->coredump_download)
+ ab->hif.ops->coredump_download(ab);
+}
+
+#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
new file mode 100644
index 000000000000..4571d01cc33d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/htc.c
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+
+#include "debug.h"
+#include "hif.h"
+
+struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ab, int size)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(size + sizeof(struct ath11k_htc_hdr));
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath11k_htc_hdr));
+
+ /* FW/HTC requires 4-byte aligned streams */
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath11k_warn(ab, "Unaligned HTC tx skb\n");
+
+ return skb;
+}
+
+static void ath11k_htc_control_tx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static struct sk_buff *ath11k_htc_build_tx_ctrl_skb(void *ab)
+{
+ struct sk_buff *skb;
+ struct ath11k_skb_cb *skb_cb;
+
+ skb = dev_alloc_skb(ATH11K_HTC_CONTROL_BUFFER_SIZE);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath11k_htc_hdr));
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
+
+ skb_cb = ATH11K_SKB_CB(skb);
+ memset(skb_cb, 0, sizeof(*skb_cb));
+
+ return skb;
+}
+
+static void ath11k_htc_prepare_tx_skb(struct ath11k_htc_ep *ep,
+ struct sk_buff *skb)
+{
+ struct ath11k_htc_hdr *hdr;
+
+ hdr = (struct ath11k_htc_hdr *)skb->data;
+
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->htc_info = FIELD_PREP(HTC_HDR_ENDPOINTID, ep->eid) |
+ FIELD_PREP(HTC_HDR_PAYLOADLEN,
+ (skb->len - sizeof(*hdr)));
+
+ if (ep->tx_credit_flow_enabled)
+ hdr->htc_info |= FIELD_PREP(HTC_HDR_FLAGS,
+ ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE);
+
+ spin_lock_bh(&ep->htc->tx_lock);
+ hdr->ctrl_info = FIELD_PREP(HTC_HDR_CONTROLBYTES1, ep->seq_no++);
+ spin_unlock_bh(&ep->htc->tx_lock);
+}
+
+int ath11k_htc_send(struct ath11k_htc *htc,
+ enum ath11k_htc_ep_id eid,
+ struct sk_buff *skb)
+{
+ struct ath11k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct device *dev = htc->ab->dev;
+ struct ath11k_base *ab = htc->ab;
+ int credits = 0;
+ int ret;
+ bool credit_flow_enabled = (ab->hw_params.credit_flow &&
+ ep->tx_credit_flow_enabled);
+
+ if (eid >= ATH11K_HTC_EP_COUNT) {
+ ath11k_warn(ab, "Invalid endpoint id: %d\n", eid);
+ return -ENOENT;
+ }
+
+ skb_push(skb, sizeof(struct ath11k_htc_hdr));
+
+ if (credit_flow_enabled) {
+ credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ if (ep->tx_credits < credits) {
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "ep %d insufficient credits required %d total %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ ret = -EAGAIN;
+ goto err_pull;
+ }
+ ep->tx_credits -= credits;
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "ep %d credits consumed %d total %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
+ ath11k_htc_prepare_tx_skb(ep, skb);
+
+ skb_cb->eid = eid;
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, skb_cb->paddr);
+ if (ret) {
+ ret = -EIO;
+ goto err_credits;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "tx skb %p eid %d paddr %pad\n",
+ skb, skb_cb->eid, &skb_cb->paddr);
+
+ ret = ath11k_ce_send(htc->ab, skb, ep->ul_pipe_id, ep->eid);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+err_credits:
+ if (credit_flow_enabled) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "ep %d credits reverted %d total %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ab);
+ }
+err_pull:
+ skb_pull(skb, sizeof(struct ath11k_htc_hdr));
+ return ret;
+}
+
+static void
+ath11k_htc_process_credit_report(struct ath11k_htc *htc,
+ const struct ath11k_htc_credit_report *report,
+ int len,
+ enum ath11k_htc_ep_id eid)
+{
+ struct ath11k_base *ab = htc->ab;
+ struct ath11k_htc_ep *ep;
+ int i, n_reports;
+
+ if (len % sizeof(*report))
+ ath11k_warn(ab, "Uneven credit report len %d", len);
+
+ n_reports = len / sizeof(*report);
+
+ spin_lock_bh(&htc->tx_lock);
+ for (i = 0; i < n_reports; i++, report++) {
+ if (report->eid >= ATH11K_HTC_EP_COUNT)
+ break;
+
+ ep = &htc->endpoint[report->eid];
+ ep->tx_credits += report->credits;
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "ep %d credits got %d total %d\n",
+ report->eid, report->credits, ep->tx_credits);
+
+ if (ep->ep_ops.ep_tx_credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ep->ep_ops.ep_tx_credits(htc->ab);
+ spin_lock_bh(&htc->tx_lock);
+ }
+ }
+ spin_unlock_bh(&htc->tx_lock);
+}
+
+static int ath11k_htc_process_trailer(struct ath11k_htc *htc,
+ u8 *buffer,
+ int length,
+ enum ath11k_htc_ep_id src_eid)
+{
+ struct ath11k_base *ab = htc->ab;
+ int status = 0;
+ struct ath11k_htc_record *record;
+ size_t len;
+
+ while (length > 0) {
+ record = (struct ath11k_htc_record *)buffer;
+
+ if (length < sizeof(record->hdr)) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (record->hdr.len > length) {
+ /* no room left in buffer for record */
+ ath11k_warn(ab, "Invalid record length: %d\n",
+ record->hdr.len);
+ status = -EINVAL;
+ break;
+ }
+
+ if (ab->hw_params.credit_flow) {
+ switch (record->hdr.id) {
+ case ATH11K_HTC_RECORD_CREDITS:
+ len = sizeof(struct ath11k_htc_credit_report);
+ if (record->hdr.len < len) {
+ ath11k_warn(ab, "Credit report too long\n");
+ status = -EINVAL;
+ break;
+ }
+ ath11k_htc_process_credit_report(htc,
+ record->credit_report,
+ record->hdr.len,
+ src_eid);
+ break;
+ default:
+ ath11k_warn(ab, "Unhandled record: id:%d length:%d\n",
+ record->hdr.id, record->hdr.len);
+ break;
+ }
+ }
+
+ if (status)
+ break;
+
+ /* multiple records may be present in a trailer */
+ buffer += sizeof(record->hdr) + record->hdr.len;
+ length -= sizeof(record->hdr) + record->hdr.len;
+ }
+
+ return status;
+}
+
+static void ath11k_htc_suspend_complete(struct ath11k_base *ab, bool ack)
+{
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "suspend complete %d\n", ack);
+
+ if (ack)
+ set_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
+ else
+ clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
+
+ complete(&ab->htc_suspend);
+}
+
+void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_htc *htc = &ab->htc;
+ struct ath11k_htc_ep *ep;
+ void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *);
+ u8 eid;
+
+ eid = ATH11K_SKB_CB(skb)->eid;
+ if (eid >= ATH11K_HTC_EP_COUNT) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ ep = &htc->endpoint[eid];
+ spin_lock_bh(&htc->tx_lock);
+ ep_tx_complete = ep->ep_ops.ep_tx_complete;
+ spin_unlock_bh(&htc->tx_lock);
+ if (!ep_tx_complete) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ ep_tx_complete(htc->ab, skb);
+}
+
+static void ath11k_htc_wakeup_from_suspend(struct ath11k_base *ab)
+{
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "wakeup from suspend is received\n");
+}
+
+void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ int status = 0;
+ struct ath11k_htc *htc = &ab->htc;
+ struct ath11k_htc_hdr *hdr;
+ struct ath11k_htc_ep *ep;
+ u16 payload_len;
+ u32 message_id, trailer_len = 0;
+ size_t min_len;
+ u8 eid;
+ bool trailer_present;
+
+ hdr = (struct ath11k_htc_hdr *)skb->data;
+ skb_pull(skb, sizeof(*hdr));
+
+ eid = FIELD_GET(HTC_HDR_ENDPOINTID, hdr->htc_info);
+
+ if (eid >= ATH11K_HTC_EP_COUNT) {
+ ath11k_warn(ab, "HTC Rx: invalid eid %d\n", eid);
+ goto out;
+ }
+
+ ep = &htc->endpoint[eid];
+
+ payload_len = FIELD_GET(HTC_HDR_PAYLOADLEN, hdr->htc_info);
+
+ if (payload_len + sizeof(*hdr) > ATH11K_HTC_MAX_LEN) {
+ ath11k_warn(ab, "HTC rx frame too long, len: %zu\n",
+ payload_len + sizeof(*hdr));
+ goto out;
+ }
+
+ if (skb->len < payload_len) {
+ ath11k_warn(ab, "HTC Rx: insufficient length, got %d, expected %d\n",
+ skb->len, payload_len);
+ goto out;
+ }
+
+ /* get flags to check for trailer */
+ trailer_present = (FIELD_GET(HTC_HDR_FLAGS, hdr->htc_info)) &
+ ATH11K_HTC_FLAG_TRAILER_PRESENT;
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "rx ep %d skb %p trailer_present %d\n",
+ eid, skb, trailer_present);
+
+ if (trailer_present) {
+ u8 *trailer;
+
+ trailer_len = FIELD_GET(HTC_HDR_CONTROLBYTES0, hdr->ctrl_info);
+ min_len = sizeof(struct ath11k_htc_record_hdr);
+
+ if ((trailer_len < min_len) ||
+ (trailer_len > payload_len)) {
+ ath11k_warn(ab, "Invalid trailer length: %d\n",
+ trailer_len);
+ goto out;
+ }
+
+ trailer = (u8 *)hdr;
+ trailer += sizeof(*hdr);
+ trailer += payload_len;
+ trailer -= trailer_len;
+ status = ath11k_htc_process_trailer(htc, trailer,
+ trailer_len, eid);
+ if (status)
+ goto out;
+
+ skb_trim(skb, skb->len - trailer_len);
+ }
+
+ if (trailer_len >= payload_len)
+ /* zero length packet with trailer data, just drop these */
+ goto out;
+
+ if (eid == ATH11K_HTC_EP_0) {
+ struct ath11k_htc_msg *msg = (struct ath11k_htc_msg *)skb->data;
+
+ message_id = FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id);
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "rx ep %d skb %p message_id %d\n",
+ eid, skb, message_id);
+
+ switch (message_id) {
+ case ATH11K_HTC_MSG_READY_ID:
+ case ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
+ /* handle HTC control message */
+ if (completion_done(&htc->ctl_resp)) {
+ /* this is a fatal error, target should not be
+ * sending unsolicited messages on the ep 0
+ */
+ ath11k_warn(ab, "HTC rx ctrl still processing\n");
+ complete(&htc->ctl_resp);
+ goto out;
+ }
+
+ htc->control_resp_len =
+ min_t(int, skb->len,
+ ATH11K_HTC_MAX_CTRL_MSG_LEN);
+
+ memcpy(htc->control_resp_buffer, skb->data,
+ htc->control_resp_len);
+
+ complete(&htc->ctl_resp);
+ break;
+ case ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE:
+ ath11k_htc_suspend_complete(ab, true);
+ break;
+ case ATH11K_HTC_MSG_NACK_SUSPEND:
+ ath11k_htc_suspend_complete(ab, false);
+ break;
+ case ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
+ ath11k_htc_wakeup_from_suspend(ab);
+ break;
+ default:
+ ath11k_warn(ab, "ignoring unsolicited htc ep0 event %ld\n",
+ FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id));
+ break;
+ }
+ goto out;
+ }
+
+ ep->ep_ops.ep_rx_complete(ab, skb);
+
+ /* poll tx completion for interrupt disabled CE's */
+ ath11k_ce_poll_send_completed(ab, ep->ul_pipe_id);
+
+ /* skb is now owned by the rx completion handler */
+ skb = NULL;
+out:
+ kfree_skb(skb);
+}
+
+static void ath11k_htc_control_rx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ /* This is unexpected. FW is not supposed to send regular rx on this
+ * endpoint.
+ */
+ ath11k_warn(ab, "unexpected htc rx\n");
+ kfree_skb(skb);
+}
+
+static const char *htc_service_name(enum ath11k_htc_svc_id id)
+{
+ switch (id) {
+ case ATH11K_HTC_SVC_ID_RESERVED:
+ return "Reserved";
+ case ATH11K_HTC_SVC_ID_RSVD_CTRL:
+ return "Control";
+ case ATH11K_HTC_SVC_ID_WMI_CONTROL:
+ return "WMI";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_BE:
+ return "DATA BE";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_BK:
+ return "DATA BK";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_VI:
+ return "DATA VI";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_VO:
+ return "DATA VO";
+ case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1:
+ return "WMI MAC1";
+ case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2:
+ return "WMI MAC2";
+ case ATH11K_HTC_SVC_ID_NMI_CONTROL:
+ return "NMI Control";
+ case ATH11K_HTC_SVC_ID_NMI_DATA:
+ return "NMI Data";
+ case ATH11K_HTC_SVC_ID_HTT_DATA_MSG:
+ return "HTT Data";
+ case ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS:
+ return "RAW";
+ case ATH11K_HTC_SVC_ID_IPA_TX:
+ return "IPA TX";
+ case ATH11K_HTC_SVC_ID_PKT_LOG:
+ return "PKT LOG";
+ }
+
+ return "Unknown";
+}
+
+static void ath11k_htc_reset_endpoint_states(struct ath11k_htc *htc)
+{
+ struct ath11k_htc_ep *ep;
+ int i;
+
+ for (i = ATH11K_HTC_EP_0; i < ATH11K_HTC_EP_COUNT; i++) {
+ ep = &htc->endpoint[i];
+ ep->service_id = ATH11K_HTC_SVC_ID_UNUSED;
+ ep->max_ep_message_len = 0;
+ ep->max_tx_queue_depth = 0;
+ ep->eid = i;
+ ep->htc = htc;
+ ep->tx_credit_flow_enabled = true;
+ }
+}
+
+static u8 ath11k_htc_get_credit_allocation(struct ath11k_htc *htc,
+ u16 service_id)
+{
+ u8 i, allocation = 0;
+
+ for (i = 0; i < ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
+ if (htc->service_alloc_table[i].service_id == service_id) {
+ allocation =
+ htc->service_alloc_table[i].credit_allocation;
+ }
+ }
+
+ return allocation;
+}
+
+static int ath11k_htc_setup_target_buffer_assignments(struct ath11k_htc *htc)
+{
+ struct ath11k_htc_svc_tx_credits *serv_entry;
+ static const u32 svc_id[] = {
+ ATH11K_HTC_SVC_ID_WMI_CONTROL,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2,
+ };
+ int i, credits;
+
+ credits = htc->total_transmit_credits;
+ serv_entry = htc->service_alloc_table;
+
+ if ((htc->wmi_ep_count == 0) ||
+ (htc->wmi_ep_count > ARRAY_SIZE(svc_id)))
+ return -EINVAL;
+
+ /* Divide credits among number of endpoints for WMI */
+ credits = credits / htc->wmi_ep_count;
+ for (i = 0; i < htc->wmi_ep_count; i++) {
+ serv_entry[i].service_id = svc_id[i];
+ serv_entry[i].credit_allocation = credits;
+ }
+
+ return 0;
+}
+
+int ath11k_htc_wait_target(struct ath11k_htc *htc)
+{
+ int i, status = 0;
+ struct ath11k_base *ab = htc->ab;
+ unsigned long time_left;
+ struct ath11k_htc_ready *ready;
+ u16 message_id;
+ u16 credit_count;
+ u16 credit_size;
+
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH11K_HTC_WAIT_TIMEOUT_HZ);
+ if (!time_left) {
+ ath11k_warn(ab, "failed to receive control response completion, polling..\n");
+
+ for (i = 0; i < ab->hw_params.ce_count; i++)
+ ath11k_ce_per_engine_service(htc->ab, i);
+
+ time_left =
+ wait_for_completion_timeout(&htc->ctl_resp,
+ ATH11K_HTC_WAIT_TIMEOUT_HZ);
+
+ if (!time_left)
+ status = -ETIMEDOUT;
+ }
+
+ if (status < 0) {
+ ath11k_warn(ab, "ctl_resp never came in (%d)\n", status);
+ return status;
+ }
+
+ if (htc->control_resp_len < sizeof(*ready)) {
+ ath11k_warn(ab, "Invalid HTC ready msg len:%d\n",
+ htc->control_resp_len);
+ return -ECOMM;
+ }
+
+ ready = (struct ath11k_htc_ready *)htc->control_resp_buffer;
+ message_id = FIELD_GET(HTC_MSG_MESSAGEID, ready->id_credit_count);
+ credit_count = FIELD_GET(HTC_READY_MSG_CREDITCOUNT,
+ ready->id_credit_count);
+ credit_size = FIELD_GET(HTC_READY_MSG_CREDITSIZE, ready->size_ep);
+
+ if (message_id != ATH11K_HTC_MSG_READY_ID) {
+ ath11k_warn(ab, "Invalid HTC ready msg: 0x%x\n", message_id);
+ return -ECOMM;
+ }
+
+ htc->total_transmit_credits = credit_count;
+ htc->target_credit_size = credit_size;
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "target ready total_transmit_credits %d target_credit_size %d\n",
+ htc->total_transmit_credits, htc->target_credit_size);
+
+ if ((htc->total_transmit_credits == 0) ||
+ (htc->target_credit_size == 0)) {
+ ath11k_warn(ab, "Invalid credit size received\n");
+ return -ECOMM;
+ }
+
+ /* For QCA6390, wmi endpoint uses 1 credit to avoid
+ * back-to-back write.
+ */
+ if (ab->hw_params.supports_shadow_regs)
+ htc->total_transmit_credits = 1;
+
+ ath11k_htc_setup_target_buffer_assignments(htc);
+
+ return 0;
+}
+
+int ath11k_htc_connect_service(struct ath11k_htc *htc,
+ struct ath11k_htc_svc_conn_req *conn_req,
+ struct ath11k_htc_svc_conn_resp *conn_resp)
+{
+ struct ath11k_base *ab = htc->ab;
+ struct ath11k_htc_conn_svc *req_msg;
+ struct ath11k_htc_conn_svc_resp resp_msg_dummy;
+ struct ath11k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy;
+ enum ath11k_htc_ep_id assigned_eid = ATH11K_HTC_EP_COUNT;
+ struct ath11k_htc_ep *ep;
+ struct sk_buff *skb;
+ unsigned int max_msg_size = 0;
+ int length, status;
+ unsigned long time_left;
+ bool disable_credit_flow_ctrl = false;
+ u16 message_id, service_id, flags = 0;
+ u8 tx_alloc = 0;
+
+ /* special case for HTC pseudo control service */
+ if (conn_req->service_id == ATH11K_HTC_SVC_ID_RSVD_CTRL) {
+ disable_credit_flow_ctrl = true;
+ assigned_eid = ATH11K_HTC_EP_0;
+ max_msg_size = ATH11K_HTC_MAX_CTRL_MSG_LEN;
+ memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
+ goto setup;
+ }
+
+ tx_alloc = ath11k_htc_get_credit_allocation(htc,
+ conn_req->service_id);
+ if (!tx_alloc)
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "htc service %s does not allocate target credits\n",
+ htc_service_name(conn_req->service_id));
+
+ skb = ath11k_htc_build_tx_ctrl_skb(htc->ab);
+ if (!skb) {
+ ath11k_warn(ab, "Failed to allocate HTC packet\n");
+ return -ENOMEM;
+ }
+
+ length = sizeof(*req_msg);
+ skb_put(skb, length);
+ memset(skb->data, 0, length);
+
+ req_msg = (struct ath11k_htc_conn_svc *)skb->data;
+ req_msg->msg_svc_id = FIELD_PREP(HTC_MSG_MESSAGEID,
+ ATH11K_HTC_MSG_CONNECT_SERVICE_ID);
+
+ flags |= FIELD_PREP(ATH11K_HTC_CONN_FLAGS_RECV_ALLOC, tx_alloc);
+
+ /* Only enable credit flow control for WMI ctrl service */
+ if (!(conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL ||
+ conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 ||
+ conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2)) {
+ flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+ disable_credit_flow_ctrl = true;
+ }
+
+ if (!ab->hw_params.credit_flow) {
+ flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+ disable_credit_flow_ctrl = true;
+ }
+
+ req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags);
+ req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID,
+ conn_req->service_id);
+
+ reinit_completion(&htc->ctl_resp);
+
+ status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ /* wait for response */
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH11K_HTC_CONN_SVC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath11k_err(ab, "Service connect timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* we controlled the buffer creation, it's aligned */
+ resp_msg = (struct ath11k_htc_conn_svc_resp *)htc->control_resp_buffer;
+ message_id = FIELD_GET(HTC_MSG_MESSAGEID, resp_msg->msg_svc_id);
+ service_id = FIELD_GET(HTC_SVC_RESP_MSG_SERVICEID,
+ resp_msg->msg_svc_id);
+
+ if ((message_id != ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
+ (htc->control_resp_len < sizeof(*resp_msg))) {
+ ath11k_err(ab, "Invalid resp message ID 0x%x", message_id);
+ return -EPROTO;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "service %s connect response status 0x%lx assigned ep 0x%lx\n",
+ htc_service_name(service_id),
+ FIELD_GET(HTC_SVC_RESP_MSG_STATUS, resp_msg->flags_len),
+ FIELD_GET(HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len));
+
+ conn_resp->connect_resp_code = FIELD_GET(HTC_SVC_RESP_MSG_STATUS,
+ resp_msg->flags_len);
+
+ /* check response status */
+ if (conn_resp->connect_resp_code != ATH11K_HTC_CONN_SVC_STATUS_SUCCESS) {
+ ath11k_err(ab, "HTC Service %s connect request failed: 0x%x)\n",
+ htc_service_name(service_id),
+ conn_resp->connect_resp_code);
+ return -EPROTO;
+ }
+
+ assigned_eid = (enum ath11k_htc_ep_id)FIELD_GET(
+ HTC_SVC_RESP_MSG_ENDPOINTID,
+ resp_msg->flags_len);
+
+ max_msg_size = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+ resp_msg->flags_len);
+
+setup:
+
+ if (assigned_eid >= ATH11K_HTC_EP_COUNT)
+ return -EPROTO;
+
+ if (max_msg_size == 0)
+ return -EPROTO;
+
+ ep = &htc->endpoint[assigned_eid];
+ ep->eid = assigned_eid;
+
+ if (ep->service_id != ATH11K_HTC_SVC_ID_UNUSED)
+ return -EPROTO;
+
+ /* return assigned endpoint to caller */
+ conn_resp->eid = assigned_eid;
+ conn_resp->max_msg_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+ resp_msg->flags_len);
+
+ /* setup the endpoint */
+ ep->service_id = conn_req->service_id;
+ ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
+ ep->max_ep_message_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+ resp_msg->flags_len);
+ ep->tx_credits = tx_alloc;
+
+ /* copy all the callbacks */
+ ep->ep_ops = conn_req->ep_ops;
+
+ status = ath11k_hif_map_service_to_pipe(htc->ab,
+ ep->service_id,
+ &ep->ul_pipe_id,
+ &ep->dl_pipe_id);
+ if (status)
+ return status;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
+ htc_service_name(ep->service_id), ep->ul_pipe_id,
+ ep->dl_pipe_id, ep->eid);
+
+ if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
+ ep->tx_credit_flow_enabled = false;
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "htc service '%s' eid %d tx flow control disabled\n",
+ htc_service_name(ep->service_id), assigned_eid);
+ }
+
+ return status;
+}
+
+int ath11k_htc_start(struct ath11k_htc *htc)
+{
+ struct sk_buff *skb;
+ int status = 0;
+ struct ath11k_base *ab = htc->ab;
+ struct ath11k_htc_setup_complete_extended *msg;
+
+ skb = ath11k_htc_build_tx_ctrl_skb(htc->ab);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(*msg));
+ memset(skb->data, 0, skb->len);
+
+ msg = (struct ath11k_htc_setup_complete_extended *)skb->data;
+ msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID,
+ ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+ if (ab->hw_params.credit_flow)
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "using tx credit flow control\n");
+ else
+ msg->flags |= ATH11K_GLOBAL_DISABLE_CREDIT_FLOW;
+
+ status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ return 0;
+}
+
+int ath11k_htc_init(struct ath11k_base *ab)
+{
+ struct ath11k_htc *htc = &ab->htc;
+ struct ath11k_htc_svc_conn_req conn_req;
+ struct ath11k_htc_svc_conn_resp conn_resp;
+ int ret;
+
+ spin_lock_init(&htc->tx_lock);
+
+ ath11k_htc_reset_endpoint_states(htc);
+
+ htc->ab = ab;
+
+ switch (ab->wmi_ab.preferred_hw_mode) {
+ case WMI_HOST_HW_MODE_SINGLE:
+ htc->wmi_ep_count = 1;
+ break;
+ case WMI_HOST_HW_MODE_DBS:
+ case WMI_HOST_HW_MODE_DBS_OR_SBS:
+ htc->wmi_ep_count = 2;
+ break;
+ case WMI_HOST_HW_MODE_DBS_SBS:
+ htc->wmi_ep_count = 3;
+ break;
+ default:
+ htc->wmi_ep_count = ab->hw_params.max_radios;
+ break;
+ }
+
+ /* setup our pseudo HTC control endpoint connection */
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+ conn_req.ep_ops.ep_tx_complete = ath11k_htc_control_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath11k_htc_control_rx_complete;
+ conn_req.max_send_queue_depth = ATH11K_NUM_CONTROL_TX_BUFFERS;
+ conn_req.service_id = ATH11K_HTC_SVC_ID_RSVD_CTRL;
+
+ /* connect fake service */
+ ret = ath11k_htc_connect_service(htc, &conn_req, &conn_resp);
+ if (ret) {
+ ath11k_err(ab, "could not connect to htc service (%d)\n", ret);
+ return ret;
+ }
+
+ init_completion(&htc->ctl_resp);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/htc.h b/drivers/net/wireless/ath/ath11k/htc.h
new file mode 100644
index 000000000000..86f77eacaea7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/htc.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_HTC_H
+#define ATH11K_HTC_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+
+struct ath11k_base;
+
+#define HTC_HDR_ENDPOINTID GENMASK(7, 0)
+#define HTC_HDR_FLAGS GENMASK(15, 8)
+#define HTC_HDR_PAYLOADLEN GENMASK(31, 16)
+#define HTC_HDR_CONTROLBYTES0 GENMASK(7, 0)
+#define HTC_HDR_CONTROLBYTES1 GENMASK(15, 8)
+#define HTC_HDR_RESERVED GENMASK(31, 16)
+
+#define HTC_SVC_MSG_SERVICE_ID GENMASK(31, 16)
+#define HTC_SVC_MSG_CONNECTIONFLAGS GENMASK(15, 0)
+#define HTC_SVC_MSG_SERVICEMETALENGTH GENMASK(23, 16)
+#define HTC_READY_MSG_CREDITCOUNT GENMASK(31, 16)
+#define HTC_READY_MSG_CREDITSIZE GENMASK(15, 0)
+#define HTC_READY_MSG_MAXENDPOINTS GENMASK(23, 16)
+
+#define HTC_READY_EX_MSG_HTCVERSION GENMASK(7, 0)
+#define HTC_READY_EX_MSG_MAXMSGSPERHTCBUNDLE GENMASK(15, 8)
+
+#define HTC_SVC_RESP_MSG_SERVICEID GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_STATUS GENMASK(7, 0)
+#define HTC_SVC_RESP_MSG_ENDPOINTID GENMASK(15, 8)
+#define HTC_SVC_RESP_MSG_MAXMSGSIZE GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_SERVICEMETALENGTH GENMASK(7, 0)
+
+#define HTC_MSG_MESSAGEID GENMASK(15, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_SETUPFLAGS GENMASK(31, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_MAXMSGSPERBUNDLEDRECV GENMASK(7, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD0 GENMASK(15, 8)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD1 GENMASK(23, 16)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD2 GENMASK(31, 24)
+
+enum ath11k_htc_tx_flags {
+ ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
+ ATH11K_HTC_FLAG_SEND_BUNDLE = 0x02
+};
+
+enum ath11k_htc_rx_flags {
+ ATH11K_HTC_FLAG_TRAILER_PRESENT = 0x02,
+ ATH11K_HTC_FLAG_BUNDLE_MASK = 0xF0
+};
+
+struct ath11k_htc_hdr {
+ u32 htc_info;
+ u32 ctrl_info;
+} __packed __aligned(4);
+
+enum ath11k_htc_msg_id {
+ ATH11K_HTC_MSG_READY_ID = 1,
+ ATH11K_HTC_MSG_CONNECT_SERVICE_ID = 2,
+ ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
+ ATH11K_HTC_MSG_SETUP_COMPLETE_ID = 4,
+ ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
+ ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6,
+ ATH11K_HTC_MSG_NACK_SUSPEND = 7,
+ ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID = 8,
+};
+
+enum ath11k_htc_version {
+ ATH11K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
+ ATH11K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
+};
+
+#define ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK GENMASK(1, 0)
+#define ATH11K_HTC_CONN_FLAGS_RECV_ALLOC GENMASK(15, 8)
+
+enum ath11k_htc_conn_flags {
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0,
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
+ ATH11K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 0x4,
+ ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 0x8,
+};
+
+enum ath11k_htc_conn_svc_status {
+ ATH11K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
+ ATH11K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
+ ATH11K_HTC_CONN_SVC_STATUS_FAILED = 2,
+ ATH11K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
+ ATH11K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
+};
+
+struct ath11k_htc_ready {
+ u32 id_credit_count;
+ u32 size_ep;
+} __packed;
+
+struct ath11k_htc_ready_extended {
+ struct ath11k_htc_ready base;
+ u32 ver_bundle;
+} __packed;
+
+struct ath11k_htc_conn_svc {
+ u32 msg_svc_id;
+ u32 flags_len;
+} __packed;
+
+struct ath11k_htc_conn_svc_resp {
+ u32 msg_svc_id;
+ u32 flags_len;
+ u32 svc_meta_pad;
+} __packed;
+
+#define ATH11K_GLOBAL_DISABLE_CREDIT_FLOW BIT(1)
+
+struct ath11k_htc_setup_complete_extended {
+ u32 msg_id;
+ u32 flags;
+ u32 max_msgs_per_bundled_recv;
+} __packed;
+
+struct ath11k_htc_msg {
+ u32 msg_svc_id;
+ u32 flags_len;
+} __packed __aligned(4);
+
+enum ath11k_htc_record_id {
+ ATH11K_HTC_RECORD_NULL = 0,
+ ATH11K_HTC_RECORD_CREDITS = 1
+};
+
+struct ath11k_htc_record_hdr {
+ u8 id; /* @enum ath11k_htc_record_id */
+ u8 len;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath11k_htc_credit_report {
+ u8 eid; /* @enum ath11k_htc_ep_id */
+ u8 credits;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath11k_htc_record {
+ struct ath11k_htc_record_hdr hdr;
+ struct ath11k_htc_credit_report credit_report[];
+} __packed __aligned(4);
+
+enum ath11k_htc_svc_gid {
+ ATH11K_HTC_SVC_GRP_RSVD = 0,
+ ATH11K_HTC_SVC_GRP_WMI = 1,
+ ATH11K_HTC_SVC_GRP_NMI = 2,
+ ATH11K_HTC_SVC_GRP_HTT = 3,
+ ATH11K_HTC_SVC_GRP_CFG = 4,
+ ATH11K_HTC_SVC_GRP_IPA = 5,
+ ATH11K_HTC_SVC_GRP_PKTLOG = 6,
+
+ ATH11K_HTC_SVC_GRP_TEST = 254,
+ ATH11K_HTC_SVC_GRP_LAST = 255,
+};
+
+#define SVC(group, idx) \
+ (int)(((int)(group) << 8) | (int)(idx))
+
+enum ath11k_htc_svc_id {
+ /* NOTE: service ID of 0x0000 is reserved and should never be used */
+ ATH11K_HTC_SVC_ID_RESERVED = 0x0000,
+ ATH11K_HTC_SVC_ID_UNUSED = ATH11K_HTC_SVC_ID_RESERVED,
+
+ ATH11K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH11K_HTC_SVC_GRP_RSVD, 1),
+ ATH11K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH11K_HTC_SVC_GRP_WMI, 0),
+ ATH11K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH11K_HTC_SVC_GRP_WMI, 1),
+ ATH11K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH11K_HTC_SVC_GRP_WMI, 2),
+ ATH11K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH11K_HTC_SVC_GRP_WMI, 3),
+ ATH11K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH11K_HTC_SVC_GRP_WMI, 4),
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 = SVC(ATH11K_HTC_SVC_GRP_WMI, 5),
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 = SVC(ATH11K_HTC_SVC_GRP_WMI, 6),
+
+ ATH11K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH11K_HTC_SVC_GRP_NMI, 0),
+ ATH11K_HTC_SVC_ID_NMI_DATA = SVC(ATH11K_HTC_SVC_GRP_NMI, 1),
+
+ ATH11K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH11K_HTC_SVC_GRP_HTT, 0),
+
+ /* raw stream service (i.e. flash, tcmd, calibration apps) */
+ ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH11K_HTC_SVC_GRP_TEST, 0),
+ ATH11K_HTC_SVC_ID_IPA_TX = SVC(ATH11K_HTC_SVC_GRP_IPA, 0),
+ ATH11K_HTC_SVC_ID_PKT_LOG = SVC(ATH11K_HTC_SVC_GRP_PKTLOG, 0),
+};
+
+#undef SVC
+
+enum ath11k_htc_ep_id {
+ ATH11K_HTC_EP_UNUSED = -1,
+ ATH11K_HTC_EP_0 = 0,
+ ATH11K_HTC_EP_1 = 1,
+ ATH11K_HTC_EP_2,
+ ATH11K_HTC_EP_3,
+ ATH11K_HTC_EP_4,
+ ATH11K_HTC_EP_5,
+ ATH11K_HTC_EP_6,
+ ATH11K_HTC_EP_7,
+ ATH11K_HTC_EP_8,
+ ATH11K_HTC_EP_COUNT,
+};
+
+struct ath11k_htc_ep_ops {
+ void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *);
+ void (*ep_rx_complete)(struct ath11k_base *, struct sk_buff *);
+ void (*ep_tx_credits)(struct ath11k_base *);
+};
+
+/* service connection information */
+struct ath11k_htc_svc_conn_req {
+ u16 service_id;
+ struct ath11k_htc_ep_ops ep_ops;
+ int max_send_queue_depth;
+};
+
+/* service connection response information */
+struct ath11k_htc_svc_conn_resp {
+ u8 buffer_len;
+ u8 actual_len;
+ enum ath11k_htc_ep_id eid;
+ unsigned int max_msg_len;
+ u8 connect_resp_code;
+};
+
+#define ATH11K_NUM_CONTROL_TX_BUFFERS 2
+#define ATH11K_HTC_MAX_LEN 4096
+#define ATH11K_HTC_MAX_CTRL_MSG_LEN 256
+#define ATH11K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
+#define ATH11K_HTC_CONTROL_BUFFER_SIZE (ATH11K_HTC_MAX_CTRL_MSG_LEN + \
+ sizeof(struct ath11k_htc_hdr))
+#define ATH11K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
+#define ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES 8
+
+struct ath11k_htc_ep {
+ struct ath11k_htc *htc;
+ enum ath11k_htc_ep_id eid;
+ enum ath11k_htc_svc_id service_id;
+ struct ath11k_htc_ep_ops ep_ops;
+
+ int max_tx_queue_depth;
+ int max_ep_message_len;
+ u8 ul_pipe_id;
+ u8 dl_pipe_id;
+
+ u8 seq_no; /* for debugging */
+ int tx_credits;
+ bool tx_credit_flow_enabled;
+};
+
+struct ath11k_htc_svc_tx_credits {
+ u16 service_id;
+ u8 credit_allocation;
+};
+
+struct ath11k_htc {
+ struct ath11k_base *ab;
+ struct ath11k_htc_ep endpoint[ATH11K_HTC_EP_COUNT];
+
+ /* protects endpoints */
+ spinlock_t tx_lock;
+
+ u8 control_resp_buffer[ATH11K_HTC_MAX_CTRL_MSG_LEN];
+ int control_resp_len;
+
+ struct completion ctl_resp;
+
+ int total_transmit_credits;
+ struct ath11k_htc_svc_tx_credits
+ service_alloc_table[ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES];
+ int target_credit_size;
+ u8 wmi_ep_count;
+};
+
+int ath11k_htc_init(struct ath11k_base *ar);
+int ath11k_htc_wait_target(struct ath11k_htc *htc);
+int ath11k_htc_start(struct ath11k_htc *htc);
+int ath11k_htc_connect_service(struct ath11k_htc *htc,
+ struct ath11k_htc_svc_conn_req *conn_req,
+ struct ath11k_htc_svc_conn_resp *conn_resp);
+int ath11k_htc_send(struct ath11k_htc *htc, enum ath11k_htc_ep_id eid,
+ struct sk_buff *packet);
+struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ar, int size);
+void ath11k_htc_rx_completion_handler(struct ath11k_base *ar,
+ struct sk_buff *skb);
+void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
+ struct sk_buff *skb);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
new file mode 100644
index 000000000000..caa6dc12a790
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -0,0 +1,2858 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#include "core.h"
+#include "ce.h"
+#include "hif.h"
+#include "hal.h"
+#include "hw.h"
+
+/* Map from pdev index to hw mac index */
+static u8 ath11k_hw_ipq8074_mac_from_pdev_id(int pdev_idx)
+{
+ switch (pdev_idx) {
+ case 0:
+ return 0;
+ case 1:
+ return 2;
+ case 2:
+ return 1;
+ default:
+ return ATH11K_INVALID_HW_MAC_ID;
+ }
+}
+
+static u8 ath11k_hw_ipq6018_mac_from_pdev_id(int pdev_idx)
+{
+ return pdev_idx;
+}
+
+static void ath11k_hw_ipq8074_tx_mesh_enable(struct ath11k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd)
+{
+ tcl_cmd->info2 |= FIELD_PREP(HAL_IPQ8074_TCL_DATA_CMD_INFO2_MESH_ENABLE,
+ true);
+}
+
+static void ath11k_hw_qcn9074_tx_mesh_enable(struct ath11k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd)
+{
+ tcl_cmd->info3 |= FIELD_PREP(HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE,
+ true);
+}
+
+static void ath11k_hw_wcn6855_tx_mesh_enable(struct ath11k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd)
+{
+ tcl_cmd->info3 |= FIELD_PREP(HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE,
+ true);
+}
+
+static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
+ struct target_resource_config *config)
+{
+ config->num_vdevs = ab->hw_params.num_vdevs;
+ config->num_peers = 16;
+ config->num_tids = 32;
+
+ config->num_offload_peers = 3;
+ config->num_offload_reorder_buffs = 3;
+ config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+ config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+ config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config->num_mcast_groups = 0;
+ config->num_mcast_table_elems = 0;
+ config->mcast2ucast_mode = 0;
+ config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config->num_wds_entries = 0;
+ config->dma_burst_size = 0;
+ config->rx_skip_defrag_timeout_dup_detection_check = 0;
+ config->vow_config = TARGET_VOW_CONFIG;
+ config->gtk_offload_max_vdev = 2;
+ config->num_msdu_desc = 0x400;
+ config->beacon_tx_offload_max_vdev = 2;
+ config->rx_batchmode = TARGET_RX_BATCHMODE;
+
+ config->peer_map_unmap_v2_support = 0;
+ config->use_pdev_id = 1;
+ config->max_frag_entries = 0xa;
+ config->num_tdls_vdevs = 0x1;
+ config->num_tdls_conn_table_entries = 8;
+ config->beacon_tx_offload_max_vdev = 0x2;
+ config->num_multicast_filter_entries = 0x20;
+ config->num_wow_filters = 0x16;
+ config->num_keep_alive_pattern = 0;
+ config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
+}
+
+static void ath11k_hw_ipq8074_reo_setup(struct ath11k_base *ab)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+ /* Each hash entry uses three bits to map to a particular ring. */
+ u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+ HAL_HASH_ROUTING_RING_SW2 << 3 |
+ HAL_HASH_ROUTING_RING_SW3 << 6 |
+ HAL_HASH_ROUTING_RING_SW4 << 9 |
+ HAL_HASH_ROUTING_RING_SW1 << 12 |
+ HAL_HASH_ROUTING_RING_SW2 << 15 |
+ HAL_HASH_ROUTING_RING_SW3 << 18 |
+ HAL_HASH_ROUTING_RING_SW4 << 21;
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+
+ val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
+ val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
+ HAL_SRNG_RING_ID_REO2SW1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+}
+
+static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab,
+ struct target_resource_config *config)
+{
+ config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS(ab);
+
+ if (ab->num_radios == 2) {
+ config->num_peers = TARGET_NUM_PEERS(ab, DBS);
+ config->num_tids = TARGET_NUM_TIDS(ab, DBS);
+ } else if (ab->num_radios == 3) {
+ config->num_peers = TARGET_NUM_PEERS(ab, DBS_SBS);
+ config->num_tids = TARGET_NUM_TIDS(ab, DBS_SBS);
+ } else {
+ /* Control should not reach here */
+ config->num_peers = TARGET_NUM_PEERS(ab, SINGLE);
+ config->num_tids = TARGET_NUM_TIDS(ab, SINGLE);
+ }
+ config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
+ config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
+ config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+
+ if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
+ else
+ config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+
+ config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
+ config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
+ config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
+ config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
+ config->dma_burst_size = TARGET_DMA_BURST_SIZE;
+ config->rx_skip_defrag_timeout_dup_detection_check =
+ TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config->vow_config = TARGET_VOW_CONFIG;
+ config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
+ config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
+ config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
+ config->rx_batchmode = TARGET_RX_BATCHMODE;
+ config->peer_map_unmap_v2_support = 1;
+ config->twt_ap_pdev_count = ab->num_radios;
+ config->twt_ap_sta_count = 1000;
+ config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
+ config->flag1 |= WMI_RSRC_CFG_FLAG1_ACK_RSSI;
+ config->ema_max_vap_cnt = ab->num_radios;
+ config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
+ config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
+}
+
+static int ath11k_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return mac_id;
+}
+
+static int ath11k_hw_mac_id_to_srng_id_ipq8074(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return 0;
+}
+
+static int ath11k_hw_mac_id_to_pdev_id_qca6390(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return 0;
+}
+
+static int ath11k_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return mac_id;
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
+ __le32_to_cpu(desc->u.ipq8074.msdu_end.info2));
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU,
+ __le32_to_cpu(desc->u.ipq8074.msdu_end.info2));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
+ __le32_to_cpu(desc->u.ipq8074.msdu_end.info2));
+}
+
+static u8 *ath11k_hw_ipq8074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
+{
+ return desc->u.ipq8074.hdr_status;
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
+}
+
+static u32 ath11k_hw_ipq8074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start.info2));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1));
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1));
+}
+
+static u16 ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1));
+}
+
+static u16 ath11k_hw_ipq8074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info1));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
+}
+
+static u32 ath11k_hw_ipq8074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.ipq8074.msdu_start.phy_meta_data);
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_TID,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start.info2));
+}
+
+static u16 ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.ipq8074.mpdu_start.sw_peer_id);
+}
+
+static void ath11k_hw_ipq8074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy((u8 *)&fdesc->u.ipq8074.msdu_end, (u8 *)&ldesc->u.ipq8074.msdu_end,
+ sizeof(struct rx_msdu_end_ipq8074));
+ memcpy((u8 *)&fdesc->u.ipq8074.attention, (u8 *)&ldesc->u.ipq8074.attention,
+ sizeof(struct rx_attention));
+ memcpy((u8 *)&fdesc->u.ipq8074.mpdu_end, (u8 *)&ldesc->u.ipq8074.mpdu_end,
+ sizeof(struct rx_mpdu_end));
+}
+
+static u32 ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(HAL_TLV_HDR_TAG,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start_tag));
+}
+
+static u32 ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.ipq8074.mpdu_start.phy_ppdu_id);
+}
+
+static void ath11k_hw_ipq8074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.ipq8074.msdu_start.info1);
+
+ info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
+ info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
+
+ desc->u.ipq8074.msdu_start.info1 = __cpu_to_le32(info);
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
+}
+
+static u8 *ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.ipq8074.mpdu_start.addr2;
+}
+
+static
+struct rx_attention *ath11k_hw_ipq8074_rx_desc_get_attention(struct hal_rx_desc *desc)
+{
+ return &desc->u.ipq8074.attention;
+}
+
+static u8 *ath11k_hw_ipq8074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.ipq8074.msdu_payload[0];
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO4_FIRST_MSDU,
+ __le16_to_cpu(desc->u.qcn9074.msdu_end.info4));
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO4_LAST_MSDU,
+ __le16_to_cpu(desc->u.qcn9074.msdu_end.info4));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_END_INFO4_L3_HDR_PADDING,
+ __le16_to_cpu(desc->u.qcn9074.msdu_end.info4));
+}
+
+static u8 *ath11k_hw_qcn9074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
+{
+ return desc->u.qcn9074.hdr_status;
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11) &
+ RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID;
+}
+
+static u32 ath11k_hw_qcn9074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO9_ENC_TYPE,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start.info9));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11));
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_FCTRL_VALID,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11));
+}
+
+static u16 ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_NUM,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11));
+}
+
+static u16 ath11k_hw_qcn9074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info1));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
+}
+
+static u32 ath11k_hw_qcn9074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9074.msdu_start.phy_meta_data);
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO9_TID,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start.info9));
+}
+
+static u16 ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9074.mpdu_start.sw_peer_id);
+}
+
+static void ath11k_hw_qcn9074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy((u8 *)&fdesc->u.qcn9074.msdu_end, (u8 *)&ldesc->u.qcn9074.msdu_end,
+ sizeof(struct rx_msdu_end_qcn9074));
+ memcpy((u8 *)&fdesc->u.qcn9074.attention, (u8 *)&ldesc->u.qcn9074.attention,
+ sizeof(struct rx_attention));
+ memcpy((u8 *)&fdesc->u.qcn9074.mpdu_end, (u8 *)&ldesc->u.qcn9074.mpdu_end,
+ sizeof(struct rx_mpdu_end));
+}
+
+static u32 ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(HAL_TLV_HDR_TAG,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start_tag));
+}
+
+static u32 ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9074.mpdu_start.phy_ppdu_id);
+}
+
+static void ath11k_hw_qcn9074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9074.msdu_start.info1);
+
+ info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
+ info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
+
+ desc->u.qcn9074.msdu_start.info1 = __cpu_to_le32(info);
+}
+
+static
+struct rx_attention *ath11k_hw_qcn9074_rx_desc_get_attention(struct hal_rx_desc *desc)
+{
+ return &desc->u.qcn9074.attention;
+}
+
+static u8 *ath11k_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.qcn9074.msdu_payload[0];
+}
+
+static bool ath11k_hw_ipq9074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11) &
+ RX_MPDU_START_INFO11_MAC_ADDR2_VALID;
+}
+
+static u8 *ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.qcn9074.mpdu_start.addr2;
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855,
+ __le32_to_cpu(desc->u.wcn6855.msdu_end.info2));
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU_WCN6855,
+ __le32_to_cpu(desc->u.wcn6855.msdu_end.info2));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
+ __le32_to_cpu(desc->u.wcn6855.msdu_end.info2));
+}
+
+static u8 *ath11k_hw_wcn6855_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
+{
+ return desc->u.wcn6855.hdr_status;
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info2));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1));
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1));
+}
+
+static u16 ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1));
+}
+
+static u16 ath11k_hw_wcn6855_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info1));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn6855.msdu_start.phy_meta_data);
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_TID_WCN6855,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info2));
+}
+
+static u16 ath11k_hw_wcn6855_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn6855.mpdu_start.sw_peer_id);
+}
+
+static void ath11k_hw_wcn6855_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy((u8 *)&fdesc->u.wcn6855.msdu_end, (u8 *)&ldesc->u.wcn6855.msdu_end,
+ sizeof(struct rx_msdu_end_wcn6855));
+ memcpy((u8 *)&fdesc->u.wcn6855.attention, (u8 *)&ldesc->u.wcn6855.attention,
+ sizeof(struct rx_attention));
+ memcpy((u8 *)&fdesc->u.wcn6855.mpdu_end, (u8 *)&ldesc->u.wcn6855.mpdu_end,
+ sizeof(struct rx_mpdu_end));
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(HAL_TLV_HDR_TAG,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start_tag));
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn6855.mpdu_start.phy_ppdu_id);
+}
+
+static void ath11k_hw_wcn6855_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.wcn6855.msdu_start.info1);
+
+ info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
+ info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
+
+ desc->u.wcn6855.msdu_start.info1 = __cpu_to_le32(info);
+}
+
+static
+struct rx_attention *ath11k_hw_wcn6855_rx_desc_get_attention(struct hal_rx_desc *desc)
+{
+ return &desc->u.wcn6855.attention;
+}
+
+static u8 *ath11k_hw_wcn6855_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.wcn6855.msdu_payload[0];
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
+}
+
+static u8 *ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.wcn6855.mpdu_start.addr2;
+}
+
+static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+ /* Each hash entry uses four bits to map to a particular ring. */
+ u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+ HAL_HASH_ROUTING_RING_SW2 << 4 |
+ HAL_HASH_ROUTING_RING_SW3 << 8 |
+ HAL_HASH_ROUTING_RING_SW4 << 12 |
+ HAL_HASH_ROUTING_RING_SW1 << 16 |
+ HAL_HASH_ROUTING_RING_SW2 << 20 |
+ HAL_HASH_ROUTING_RING_SW3 << 24 |
+ HAL_HASH_ROUTING_RING_SW4 << 28;
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+ val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTL(ab));
+ val &= ~HAL_REO1_MISC_CTL_FRAGMENT_DST_RING;
+ val |= FIELD_PREP(HAL_REO1_MISC_CTL_FRAGMENT_DST_RING, HAL_SRNG_RING_ID_REO2SW1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTL(ab), val);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ ring_hash_map);
+}
+
+static void ath11k_hw_ipq5018_reo_setup(struct ath11k_base *ab)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+
+ /* Each hash entry uses three bits to map to a particular ring. */
+ u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+ HAL_HASH_ROUTING_RING_SW2 << 4 |
+ HAL_HASH_ROUTING_RING_SW3 << 8 |
+ HAL_HASH_ROUTING_RING_SW4 << 12 |
+ HAL_HASH_ROUTING_RING_SW1 << 16 |
+ HAL_HASH_ROUTING_RING_SW2 << 20 |
+ HAL_HASH_ROUTING_RING_SW3 << 24 |
+ HAL_HASH_ROUTING_RING_SW4 << 28;
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+
+ val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
+ val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
+ HAL_SRNG_RING_ID_REO2SW1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ ring_hash_map);
+}
+
+static u16
+ath11k_hw_ipq8074_mpdu_info_get_peerid(struct hal_rx_mpdu_info *mpdu_info)
+{
+ u16 peer_id = 0;
+
+ peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID,
+ __le32_to_cpu(mpdu_info->u.ipq8074.info0));
+
+ return peer_id;
+}
+
+static u16
+ath11k_hw_qcn9074_mpdu_info_get_peerid(struct hal_rx_mpdu_info *mpdu_info)
+{
+ u16 peer_id = 0;
+
+ peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID,
+ __le32_to_cpu(mpdu_info->u.qcn9074.info0));
+
+ return peer_id;
+}
+
+static u16
+ath11k_hw_wcn6855_mpdu_info_get_peerid(struct hal_rx_mpdu_info *mpdu_info)
+{
+ u16 peer_id = 0;
+
+ peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855,
+ __le32_to_cpu(mpdu_info->u.wcn6855.info0));
+ return peer_id;
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
+}
+
+static u32 ath11k_hw_ipq8074_get_tcl_ring_selector(struct sk_buff *skb)
+{
+ /* Let the default ring selection be based on current processor
+ * number, where one of the 3 tcl rings are selected based on
+ * the smp_processor_id(). In case that ring
+ * is full/busy, we resort to other available rings.
+ * If all rings are full, we drop the packet.
+ *
+ * TODO: Add throttling logic when all rings are full
+ */
+ return smp_processor_id();
+}
+
+static u32 ath11k_hw_wcn6750_get_tcl_ring_selector(struct sk_buff *skb)
+{
+ /* Select the TCL ring based on the flow hash of the SKB instead
+ * of CPU ID. Since applications pumping the traffic can be scheduled
+ * on multiple CPUs, there is a chance that packets of the same flow
+ * could end on different TCL rings, this could sometimes results in
+ * an out of order arrival of the packets at the receiver.
+ */
+ return skb_get_hash(skb);
+}
+
+const struct ath11k_hw_ops ipq8074_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_ipq8074,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+ .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
+};
+
+const struct ath11k_hw_ops ipq6018_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_ipq8074,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+ .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
+};
+
+const struct ath11k_hw_ops qca6390_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_qca6390,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
+ .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
+};
+
+const struct ath11k_hw_ops qcn9074_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_ipq8074,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+ .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_qcn9074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
+};
+
+const struct ath11k_hw_ops wcn6855_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_qca6390,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
+ .tx_mesh_enable = ath11k_hw_wcn6855_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_wcn6855_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_wcn6855_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_wcn6855_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_wcn6855_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_wcn6855_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_wcn6855_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_wcn6855_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_wcn6855_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_wcn6855_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_wcn6855_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_wcn6855_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_wcn6855_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_wcn6855_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_wcn6855_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_wcn6855_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_wcn6855_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_wcn6855_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_wcn6855_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_wcn6855_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_wcn6855_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_wcn6855_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_wcn6855_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_wcn6855_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
+};
+
+const struct ath11k_hw_ops wcn6750_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_qca6390,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
+ .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_wcn6855_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_wcn6750_get_tcl_ring_selector,
+};
+
+/* IPQ5018 hw ops is similar to QCN9074 except for the dest ring remap */
+const struct ath11k_hw_ops ipq5018_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_ipq8074,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+ .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
+ .reo_setup = ath11k_hw_ipq5018_reo_setup,
+ .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
+};
+
+#define ATH11K_TX_RING_MASK_0 BIT(0)
+#define ATH11K_TX_RING_MASK_1 BIT(1)
+#define ATH11K_TX_RING_MASK_2 BIT(2)
+#define ATH11K_TX_RING_MASK_3 BIT(3)
+#define ATH11K_TX_RING_MASK_4 BIT(4)
+
+#define ATH11K_RX_RING_MASK_0 0x1
+#define ATH11K_RX_RING_MASK_1 0x2
+#define ATH11K_RX_RING_MASK_2 0x4
+#define ATH11K_RX_RING_MASK_3 0x8
+
+#define ATH11K_RX_ERR_RING_MASK_0 0x1
+
+#define ATH11K_RX_WBM_REL_RING_MASK_0 0x1
+
+#define ATH11K_REO_STATUS_RING_MASK_0 0x1
+
+#define ATH11K_RXDMA2HOST_RING_MASK_0 0x1
+#define ATH11K_RXDMA2HOST_RING_MASK_1 0x2
+#define ATH11K_RXDMA2HOST_RING_MASK_2 0x4
+
+#define ATH11K_HOST2RXDMA_RING_MASK_0 0x1
+#define ATH11K_HOST2RXDMA_RING_MASK_1 0x2
+#define ATH11K_HOST2RXDMA_RING_MASK_2 0x4
+
+#define ATH11K_RX_MON_STATUS_RING_MASK_0 0x1
+#define ATH11K_RX_MON_STATUS_RING_MASK_1 0x2
+#define ATH11K_RX_MON_STATUS_RING_MASK_2 0x4
+
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ ATH11K_TX_RING_MASK_1,
+ ATH11K_TX_RING_MASK_2,
+ },
+ .rx_mon_status = {
+ 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ ATH11K_RX_MON_STATUS_RING_MASK_1,
+ ATH11K_RX_MON_STATUS_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, 0, 0,
+ ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+ },
+ .host2rxdma = {
+ ATH11K_HOST2RXDMA_RING_MASK_0,
+ ATH11K_HOST2RXDMA_RING_MASK_1,
+ ATH11K_HOST2RXDMA_RING_MASK_2,
+ },
+};
+
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ },
+ .rx_mon_status = {
+ 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ ATH11K_RX_MON_STATUS_RING_MASK_1,
+ ATH11K_RX_MON_STATUS_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+ },
+ .host2rxdma = {
+ },
+};
+
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(65535),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(65535),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9 host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE10 target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(10),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE11 Not used */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[] = {
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(7),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(9),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(4),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* terminator entry */ }
+};
+
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[] = {
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(7),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(4),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* terminator entry */ }
+};
+
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE 9, 10, 11 are used by MHI driver */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[] = {
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE 9, 10, 11 are used by MHI driver */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[] = {
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ ATH11K_TX_RING_MASK_1,
+ ATH11K_TX_RING_MASK_2,
+ },
+ .rx_mon_status = {
+ 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ ATH11K_RX_MON_STATUS_RING_MASK_1,
+ ATH11K_RX_MON_STATUS_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ 0, 0, 0,
+ ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ 0, 0, 0,
+ ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, 0, 0,
+ ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ 0, 0, 0,
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ },
+ .host2rxdma = {
+ 0, 0, 0,
+ ATH11K_HOST2RXDMA_RING_MASK_0,
+ },
+};
+
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ 0,
+ ATH11K_TX_RING_MASK_2,
+ 0,
+ ATH11K_TX_RING_MASK_4,
+ },
+ .rx_mon_status = {
+ 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ },
+ .rx = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ 0, ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ 0, ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+ },
+ .host2rxdma = {
+ },
+};
+
+/* Target firmware's Copy Engine configuration for IPQ5018 */
+const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq5018[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(0x2000),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+};
+
+/* Map from service/endpoint to Copy Engine for IPQ5018.
+ * This table is derived from the CE TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq5018[] = {
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(4),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* terminator entry */ }
+};
+
+const struct ce_ie_addr ath11k_ce_ie_addr_ipq8074 = {
+ .ie1_reg_addr = CE_HOST_IE_ADDRESS,
+ .ie2_reg_addr = CE_HOST_IE_2_ADDRESS,
+ .ie3_reg_addr = CE_HOST_IE_3_ADDRESS,
+};
+
+const struct ce_ie_addr ath11k_ce_ie_addr_ipq5018 = {
+ .ie1_reg_addr = CE_HOST_IPQ5018_IE_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .ie2_reg_addr = CE_HOST_IPQ5018_IE_2_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .ie3_reg_addr = CE_HOST_IPQ5018_IE_3_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
+};
+
+const struct ce_remap ath11k_ce_remap_ipq5018 = {
+ .base = HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .size = HAL_IPQ5018_CE_SIZE,
+};
+
+const struct ath11k_hw_regs ipq8074_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000510,
+ .hal_tcl1_ring_base_msb = 0x00000514,
+ .hal_tcl1_ring_id = 0x00000518,
+ .hal_tcl1_ring_misc = 0x00000520,
+ .hal_tcl1_ring_tp_addr_lsb = 0x0000052c,
+ .hal_tcl1_ring_tp_addr_msb = 0x00000530,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000540,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000544,
+ .hal_tcl1_ring_msi1_base_lsb = 0x00000558,
+ .hal_tcl1_ring_msi1_base_msb = 0x0000055c,
+ .hal_tcl1_ring_msi1_data = 0x00000560,
+ .hal_tcl2_ring_base_lsb = 0x00000568,
+ .hal_tcl_ring_base_lsb = 0x00000618,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000720,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x0000029c,
+ .hal_reo1_ring_base_msb = 0x000002a0,
+ .hal_reo1_ring_id = 0x000002a4,
+ .hal_reo1_ring_misc = 0x000002ac,
+ .hal_reo1_ring_hp_addr_lsb = 0x000002b0,
+ .hal_reo1_ring_hp_addr_msb = 0x000002b4,
+ .hal_reo1_ring_producer_int_setup = 0x000002c0,
+ .hal_reo1_ring_msi1_base_lsb = 0x000002e4,
+ .hal_reo1_ring_msi1_base_msb = 0x000002e8,
+ .hal_reo1_ring_msi1_data = 0x000002ec,
+ .hal_reo2_ring_base_lsb = 0x000002f4,
+ .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+ .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003038,
+ .hal_reo1_ring_tp = 0x0000303c,
+ .hal_reo2_ring_hp = 0x00003040,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+ .hal_reo_tcl_ring_hp = 0x00003058,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x00000504,
+ .hal_reo_status_hp = 0x00003070,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000860,
+ .hal_wbm_idle_link_ring_misc = 0x00000870,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001d8,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000910,
+ .hal_wbm1_release_ring_base_lsb = 0x00000968,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x0,
+ .pcie_pcs_osc_dtct_config_base = 0x0,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x0,
+
+ /* REO misc control register, not used in IPQ8074 */
+ .hal_reo1_misc_ctl = 0x0,
+};
+
+const struct ath11k_hw_regs qca6390_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000684,
+ .hal_tcl1_ring_base_msb = 0x00000688,
+ .hal_tcl1_ring_id = 0x0000068c,
+ .hal_tcl1_ring_misc = 0x00000694,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006a0,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006a4,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006b4,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006b8,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006cc,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006d0,
+ .hal_tcl1_ring_msi1_data = 0x000006d4,
+ .hal_tcl2_ring_base_lsb = 0x000006dc,
+ .hal_tcl_ring_base_lsb = 0x0000078c,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000894,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x00000244,
+ .hal_reo1_ring_base_msb = 0x00000248,
+ .hal_reo1_ring_id = 0x0000024c,
+ .hal_reo1_ring_misc = 0x00000254,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000258,
+ .hal_reo1_ring_hp_addr_msb = 0x0000025c,
+ .hal_reo1_ring_producer_int_setup = 0x00000268,
+ .hal_reo1_ring_msi1_base_lsb = 0x0000028c,
+ .hal_reo1_ring_msi1_base_msb = 0x00000290,
+ .hal_reo1_ring_msi1_data = 0x00000294,
+ .hal_reo2_ring_base_lsb = 0x0000029c,
+ .hal_reo1_aging_thresh_ix_0 = 0x0000050c,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000510,
+ .hal_reo1_aging_thresh_ix_2 = 0x00000514,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000518,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003030,
+ .hal_reo1_ring_tp = 0x00003034,
+ .hal_reo2_ring_hp = 0x00003038,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003a4,
+ .hal_reo_tcl_ring_hp = 0x00003050,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x000004ac,
+ .hal_reo_status_hp = 0x00003068,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000860,
+ .hal_wbm_idle_link_ring_misc = 0x00000870,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001d8,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000910,
+ .hal_wbm1_release_ring_base_lsb = 0x00000968,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0c628,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x000008fc,
+
+ /* REO misc control register, not used in QCA6390 */
+ .hal_reo1_misc_ctl = 0x0,
+};
+
+const struct ath11k_hw_regs qcn9074_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x000004f0,
+ .hal_tcl1_ring_base_msb = 0x000004f4,
+ .hal_tcl1_ring_id = 0x000004f8,
+ .hal_tcl1_ring_misc = 0x00000500,
+ .hal_tcl1_ring_tp_addr_lsb = 0x0000050c,
+ .hal_tcl1_ring_tp_addr_msb = 0x00000510,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000520,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000524,
+ .hal_tcl1_ring_msi1_base_lsb = 0x00000538,
+ .hal_tcl1_ring_msi1_base_msb = 0x0000053c,
+ .hal_tcl1_ring_msi1_data = 0x00000540,
+ .hal_tcl2_ring_base_lsb = 0x00000548,
+ .hal_tcl_ring_base_lsb = 0x000005f8,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000700,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x0000029c,
+ .hal_reo1_ring_base_msb = 0x000002a0,
+ .hal_reo1_ring_id = 0x000002a4,
+ .hal_reo1_ring_misc = 0x000002ac,
+ .hal_reo1_ring_hp_addr_lsb = 0x000002b0,
+ .hal_reo1_ring_hp_addr_msb = 0x000002b4,
+ .hal_reo1_ring_producer_int_setup = 0x000002c0,
+ .hal_reo1_ring_msi1_base_lsb = 0x000002e4,
+ .hal_reo1_ring_msi1_base_msb = 0x000002e8,
+ .hal_reo1_ring_msi1_data = 0x000002ec,
+ .hal_reo2_ring_base_lsb = 0x000002f4,
+ .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+ .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003038,
+ .hal_reo1_ring_tp = 0x0000303c,
+ .hal_reo2_ring_hp = 0x00003040,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+ .hal_reo_tcl_ring_hp = 0x00003058,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x00000504,
+ .hal_reo_status_hp = 0x00003070,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000874,
+ .hal_wbm_idle_link_ring_misc = 0x00000884,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001ec,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000924,
+ .hal_wbm1_release_ring_base_lsb = 0x0000097c,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x0,
+
+ /* REO misc control register, not used in QCN9074 */
+ .hal_reo1_misc_ctl = 0x0,
+};
+
+const struct ath11k_hw_regs wcn6855_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000690,
+ .hal_tcl1_ring_base_msb = 0x00000694,
+ .hal_tcl1_ring_id = 0x00000698,
+ .hal_tcl1_ring_misc = 0x000006a0,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006ac,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006b0,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c0,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c4,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006d8,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006dc,
+ .hal_tcl1_ring_msi1_data = 0x000006e0,
+ .hal_tcl2_ring_base_lsb = 0x000006e8,
+ .hal_tcl_ring_base_lsb = 0x00000798,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x000008a0,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x00000244,
+ .hal_reo1_ring_base_msb = 0x00000248,
+ .hal_reo1_ring_id = 0x0000024c,
+ .hal_reo1_ring_misc = 0x00000254,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000258,
+ .hal_reo1_ring_hp_addr_msb = 0x0000025c,
+ .hal_reo1_ring_producer_int_setup = 0x00000268,
+ .hal_reo1_ring_msi1_base_lsb = 0x0000028c,
+ .hal_reo1_ring_msi1_base_msb = 0x00000290,
+ .hal_reo1_ring_msi1_data = 0x00000294,
+ .hal_reo2_ring_base_lsb = 0x0000029c,
+ .hal_reo1_aging_thresh_ix_0 = 0x000005bc,
+ .hal_reo1_aging_thresh_ix_1 = 0x000005c0,
+ .hal_reo1_aging_thresh_ix_2 = 0x000005c4,
+ .hal_reo1_aging_thresh_ix_3 = 0x000005c8,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003030,
+ .hal_reo1_ring_tp = 0x00003034,
+ .hal_reo2_ring_hp = 0x00003038,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x00000454,
+ .hal_reo_tcl_ring_hp = 0x00003060,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x0000055c,
+ .hal_reo_status_hp = 0x00003078,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x1b80000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x1b81000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x1b82000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x1b83000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000870,
+ .hal_wbm_idle_link_ring_misc = 0x00000880,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001e8,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000920,
+ .hal_wbm1_release_ring_base_lsb = 0x00000978,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0c628,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x000008fc,
+
+ /* REO misc control register, used for fragment
+ * destination ring config in WCN6855.
+ */
+ .hal_reo1_misc_ctl = 0x00000630,
+};
+
+const struct ath11k_hw_regs wcn6750_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000694,
+ .hal_tcl1_ring_base_msb = 0x00000698,
+ .hal_tcl1_ring_id = 0x0000069c,
+ .hal_tcl1_ring_misc = 0x000006a4,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006b4,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006e0,
+ .hal_tcl1_ring_msi1_data = 0x000006e4,
+ .hal_tcl2_ring_base_lsb = 0x000006ec,
+ .hal_tcl_ring_base_lsb = 0x0000079c,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x000008a4,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x000001ec,
+ .hal_reo1_ring_base_msb = 0x000001f0,
+ .hal_reo1_ring_id = 0x000001f4,
+ .hal_reo1_ring_misc = 0x000001fc,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000200,
+ .hal_reo1_ring_hp_addr_msb = 0x00000204,
+ .hal_reo1_ring_producer_int_setup = 0x00000210,
+ .hal_reo1_ring_msi1_base_lsb = 0x00000234,
+ .hal_reo1_ring_msi1_base_msb = 0x00000238,
+ .hal_reo1_ring_msi1_data = 0x0000023c,
+ .hal_reo2_ring_base_lsb = 0x00000244,
+ .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+ .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003028,
+ .hal_reo1_ring_tp = 0x0000302c,
+ .hal_reo2_ring_hp = 0x00003030,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+ .hal_reo_tcl_ring_hp = 0x00003058,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x000000e4,
+ .hal_reo_cmd_ring_hp = 0x00003010,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x00000504,
+ .hal_reo_status_hp = 0x00003070,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x0000013c,
+ .hal_sw2reo_ring_hp = 0x00003018,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000874,
+ .hal_wbm_idle_link_ring_misc = 0x00000884,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001ec,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000924,
+ .hal_wbm1_release_ring_base_lsb = 0x0000097c,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x0,
+ .pcie_pcs_osc_dtct_config_base = 0x0,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x00000504,
+
+ /* REO misc control register, used for fragment
+ * destination ring config in WCN6750.
+ */
+ .hal_reo1_misc_ctl = 0x000005d8,
+};
+
+static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_ipq8074[] = {
+ {
+ .tcl_ring_num = 0,
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .tcl_ring_num = 1,
+ .wbm_ring_num = 1,
+ .rbm_id = HAL_RX_BUF_RBM_SW1_BM,
+ },
+ {
+ .tcl_ring_num = 2,
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
+};
+
+static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_wcn6750[] = {
+ {
+ .tcl_ring_num = 0,
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .tcl_ring_num = 1,
+ .wbm_ring_num = 4,
+ .rbm_id = HAL_RX_BUF_RBM_SW4_BM,
+ },
+ {
+ .tcl_ring_num = 2,
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
+};
+
+const struct ath11k_hw_regs ipq5018_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000694,
+ .hal_tcl1_ring_base_msb = 0x00000698,
+ .hal_tcl1_ring_id = 0x0000069c,
+ .hal_tcl1_ring_misc = 0x000006a4,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006b4,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006e0,
+ .hal_tcl1_ring_msi1_data = 0x000006e4,
+ .hal_tcl2_ring_base_lsb = 0x000006ec,
+ .hal_tcl_ring_base_lsb = 0x0000079c,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x000008a4,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x000001ec,
+ .hal_reo1_ring_base_msb = 0x000001f0,
+ .hal_reo1_ring_id = 0x000001f4,
+ .hal_reo1_ring_misc = 0x000001fc,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000200,
+ .hal_reo1_ring_hp_addr_msb = 0x00000204,
+ .hal_reo1_ring_producer_int_setup = 0x00000210,
+ .hal_reo1_ring_msi1_base_lsb = 0x00000234,
+ .hal_reo1_ring_msi1_base_msb = 0x00000238,
+ .hal_reo1_ring_msi1_data = 0x0000023c,
+ .hal_reo2_ring_base_lsb = 0x00000244,
+ .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+ .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003028,
+ .hal_reo1_ring_tp = 0x0000302c,
+ .hal_reo2_ring_hp = 0x00003030,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+ .hal_reo_tcl_ring_hp = 0x00003058,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x0000013c,
+ .hal_sw2reo_ring_hp = 0x00003018,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x000000e4,
+ .hal_reo_cmd_ring_hp = 0x00003010,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x00000504,
+ .hal_reo_status_hp = 0x00003070,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x08400000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x08401000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x08402000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x08403000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000874,
+ .hal_wbm_idle_link_ring_misc = 0x00000884,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001ec,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000924,
+ .hal_wbm1_release_ring_base_lsb = 0x0000097c,
+};
+
+const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
+};
+
+const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
+};
+
+const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_wcn6750,
+};
+
+static const struct cfg80211_sar_freq_ranges ath11k_hw_sar_freq_ranges_wcn6855[] = {
+ {.start_freq = 2402, .end_freq = 2482 }, /* 2G ch1~ch13 */
+ {.start_freq = 5150, .end_freq = 5250 }, /* 5G UNII-1 ch32~ch48 */
+ {.start_freq = 5250, .end_freq = 5725 }, /* 5G UNII-2 ch50~ch144 */
+ {.start_freq = 5725, .end_freq = 5810 }, /* 5G UNII-3 ch149~ch161 */
+ {.start_freq = 5815, .end_freq = 5895 }, /* 5G UNII-4 ch163~ch177 */
+ {.start_freq = 5925, .end_freq = 6165 }, /* 6G UNII-5 Ch1, Ch2 ~ Ch41 */
+ {.start_freq = 6165, .end_freq = 6425 }, /* 6G UNII-5 ch45~ch93 */
+ {.start_freq = 6425, .end_freq = 6525 }, /* 6G UNII-6 ch97~ch113 */
+ {.start_freq = 6525, .end_freq = 6705 }, /* 6G UNII-7 ch117~ch149 */
+ {.start_freq = 6705, .end_freq = 6875 }, /* 6G UNII-7 ch153~ch185 */
+ {.start_freq = 6875, .end_freq = 7125 }, /* 6G UNII-8 ch189~ch233 */
+};
+
+const struct cfg80211_sar_capa ath11k_hw_sar_capa_wcn6855 = {
+ .type = NL80211_SAR_TYPE_POWER,
+ .num_freq_ranges = (ARRAY_SIZE(ath11k_hw_sar_freq_ranges_wcn6855)),
+ .freq_ranges = ath11k_hw_sar_freq_ranges_wcn6855,
+};
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
new file mode 100644
index 000000000000..52d9f4c13b13
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -0,0 +1,439 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_HW_H
+#define ATH11K_HW_H
+
+#include "hal.h"
+#include "wmi.h"
+
+/* Target configuration defines */
+
+/* Num VDEVS per radio */
+#define TARGET_NUM_VDEVS(ab) (ab->hw_params.num_vdevs)
+
+#define TARGET_NUM_PEERS_PDEV(ab) (ab->hw_params.num_peers + TARGET_NUM_VDEVS(ab))
+
+/* Num of peers for Single Radio mode */
+#define TARGET_NUM_PEERS_SINGLE(ab) (TARGET_NUM_PEERS_PDEV(ab))
+
+/* Num of peers for DBS */
+#define TARGET_NUM_PEERS_DBS(ab) (2 * TARGET_NUM_PEERS_PDEV(ab))
+
+/* Num of peers for DBS_SBS */
+#define TARGET_NUM_PEERS_DBS_SBS(ab) (3 * TARGET_NUM_PEERS_PDEV(ab))
+
+/* Max num of stations (per radio) */
+#define TARGET_NUM_STATIONS(ab) (ab->hw_params.num_peers)
+
+#define TARGET_NUM_PEERS(ab, x) TARGET_NUM_PEERS_##x(ab)
+#define TARGET_NUM_PEER_KEYS 2
+#define TARGET_NUM_TIDS(ab, x) (2 * TARGET_NUM_PEERS(ab, x) + \
+ 4 * TARGET_NUM_VDEVS(ab) + 8)
+
+#define TARGET_AST_SKID_LIMIT 16
+#define TARGET_NUM_OFFLD_PEERS 4
+#define TARGET_NUM_OFFLD_REORDER_BUFFS 4
+
+#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_TIMEOUT_LO_PRI 100
+#define TARGET_RX_TIMEOUT_HI_PRI 40
+
+#define TARGET_DECAP_MODE_RAW 0
+#define TARGET_DECAP_MODE_NATIVE_WIFI 1
+#define TARGET_DECAP_MODE_ETH 2
+
+#define TARGET_SCAN_MAX_PENDING_REQS 4
+#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_NUM_MCAST_GROUPS 12
+#define TARGET_NUM_MCAST_TABLE_ELEMS 64
+#define TARGET_MCAST2UCAST_MODE 2
+#define TARGET_TX_DBG_LOG_SIZE 1024
+#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_VOW_CONFIG 0
+#define TARGET_NUM_MSDU_DESC (2500)
+#define TARGET_MAX_FRAG_ENTRIES 6
+#define TARGET_MAX_BCN_OFFLD 16
+#define TARGET_NUM_WDS_ENTRIES 32
+#define TARGET_DMA_BURST_SIZE 1
+#define TARGET_RX_BATCHMODE 1
+#define TARGET_EMA_MAX_PROFILE_PERIOD 8
+
+#define ATH11K_HW_MAX_QUEUES 4
+#define ATH11K_QUEUE_LEN 4096
+
+#define ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK 0x4
+
+#define ATH11K_FW_DIR "ath11k"
+
+#define ATH11K_BOARD_MAGIC "QCA-ATH11K-BOARD"
+#define ATH11K_BOARD_API2_FILE "board-2.bin"
+#define ATH11K_DEFAULT_BOARD_FILE "board.bin"
+#define ATH11K_DEFAULT_CAL_FILE "caldata.bin"
+#define ATH11K_AMSS_FILE "amss.bin"
+#define ATH11K_M3_FILE "m3.bin"
+#define ATH11K_REGDB_FILE_NAME "regdb.bin"
+
+#define ATH11K_CE_OFFSET(ab) (ab->mem_ce - ab->mem)
+
+enum ath11k_hw_rate_cck {
+ ATH11K_HW_RATE_CCK_LP_11M = 0,
+ ATH11K_HW_RATE_CCK_LP_5_5M,
+ ATH11K_HW_RATE_CCK_LP_2M,
+ ATH11K_HW_RATE_CCK_LP_1M,
+ ATH11K_HW_RATE_CCK_SP_11M,
+ ATH11K_HW_RATE_CCK_SP_5_5M,
+ ATH11K_HW_RATE_CCK_SP_2M,
+};
+
+enum ath11k_hw_rate_ofdm {
+ ATH11K_HW_RATE_OFDM_48M = 0,
+ ATH11K_HW_RATE_OFDM_24M,
+ ATH11K_HW_RATE_OFDM_12M,
+ ATH11K_HW_RATE_OFDM_6M,
+ ATH11K_HW_RATE_OFDM_54M,
+ ATH11K_HW_RATE_OFDM_36M,
+ ATH11K_HW_RATE_OFDM_18M,
+ ATH11K_HW_RATE_OFDM_9M,
+};
+
+enum ath11k_bus {
+ ATH11K_BUS_AHB,
+ ATH11K_BUS_PCI,
+};
+
+#define ATH11K_EXT_IRQ_GRP_NUM_MAX 11
+
+struct hal_rx_desc;
+struct hal_tcl_data_cmd;
+
+struct ath11k_hw_ring_mask {
+ u8 tx[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_mon_status[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_err[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_wbm_rel[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 reo_status[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rxdma2host[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 host2rxdma[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+};
+
+struct ath11k_hw_tcl2wbm_rbm_map {
+ u8 tcl_ring_num;
+ u8 wbm_ring_num;
+ u8 rbm_id;
+};
+
+struct ath11k_hw_hal_params {
+ enum hal_rx_buf_return_buf_manager rx_buf_rbm;
+ const struct ath11k_hw_tcl2wbm_rbm_map *tcl2wbm_rbm_map;
+};
+
+struct ath11k_hw_params {
+ const char *name;
+ u16 hw_rev;
+ u8 max_radios;
+ u32 bdf_addr;
+
+ struct {
+ const char *dir;
+ size_t board_size;
+ size_t cal_offset;
+ } fw;
+
+ const struct ath11k_hw_ops *hw_ops;
+ const struct ath11k_hw_ring_mask *ring_mask;
+
+ bool internal_sleep_clock;
+
+ const struct ath11k_hw_regs *regs;
+ u32 qmi_service_ins_id;
+ const struct ce_attr *host_ce_config;
+ u32 ce_count;
+ const struct ce_pipe_config *target_ce_config;
+ u32 target_ce_count;
+ const struct service_to_pipe *svc_to_ce_map;
+ u32 svc_to_ce_map_len;
+ const struct ce_ie_addr *ce_ie_addr;
+ const struct ce_remap *ce_remap;
+
+ bool single_pdev_only;
+
+ bool rxdma1_enable;
+ int num_rxdma_per_pdev;
+ bool rx_mac_buf_ring;
+ bool vdev_start_delay;
+ bool htt_peer_map_v2;
+
+ struct {
+ u8 fft_sz;
+ u8 fft_pad_sz;
+ u8 summary_pad_sz;
+ u8 fft_hdr_len;
+ u16 max_fft_bins;
+ bool fragment_160mhz;
+ } spectral;
+
+ u16 interface_modes;
+ bool supports_monitor;
+ bool full_monitor_mode;
+ bool supports_shadow_regs;
+ bool idle_ps;
+ bool supports_sta_ps;
+ bool coldboot_cal_mm;
+ bool coldboot_cal_ftm;
+ bool cbcal_restart_fw;
+ int fw_mem_mode;
+ u32 num_vdevs;
+ u32 num_peers;
+ bool supports_suspend;
+ u32 hal_desc_sz;
+ bool supports_regdb;
+ bool fix_l1ss;
+ bool credit_flow;
+ u8 max_tx_ring;
+ const struct ath11k_hw_hal_params *hal_params;
+ bool supports_dynamic_smps_6ghz;
+ bool alloc_cacheable_memory;
+ bool supports_rssi_stats;
+ bool fw_wmi_diag_event;
+ bool current_cc_support;
+ bool dbr_debug_support;
+ bool global_reset;
+ const struct cfg80211_sar_capa *bios_sar_capa;
+ bool m3_fw_support;
+ bool fixed_bdf_addr;
+ bool fixed_mem_region;
+ bool static_window_map;
+ bool hybrid_bus_type;
+ bool fixed_fw_mem;
+ bool support_off_channel_tx;
+ bool supports_multi_bssid;
+
+ struct {
+ u32 start;
+ u32 end;
+ } sram_dump;
+
+ bool tcl_ring_retry;
+ u32 tx_ring_size;
+ bool smp2p_wow_exit;
+ bool support_fw_mac_sequence;
+ bool support_dual_stations;
+ bool pdev_suspend;
+};
+
+struct ath11k_hw_ops {
+ u8 (*get_hw_mac_from_pdev_id)(int pdev_id);
+ void (*wmi_init_config)(struct ath11k_base *ab,
+ struct target_resource_config *config);
+ int (*mac_id_to_pdev_id)(struct ath11k_hw_params *hw, int mac_id);
+ int (*mac_id_to_srng_id)(struct ath11k_hw_params *hw, int mac_id);
+ void (*tx_mesh_enable)(struct ath11k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd);
+ bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc);
+ u8 *(*rx_desc_get_hdr_status)(struct hal_rx_desc *desc);
+ bool (*rx_desc_encrypt_valid)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_decap_type)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_ldpc_support)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc);
+ u16 (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc);
+ u16 (*rx_desc_get_msdu_len)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_sgi)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_rate_mcs)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_rx_bw)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_msdu_freq)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_pkt_type)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_nss)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_mpdu_tid)(struct hal_rx_desc *desc);
+ u16 (*rx_desc_get_mpdu_peer_id)(struct hal_rx_desc *desc);
+ void (*rx_desc_copy_attn_end_tlv)(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc);
+ u32 (*rx_desc_get_mpdu_start_tag)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_mpdu_ppdu_id)(struct hal_rx_desc *desc);
+ void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, u16 len);
+ struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc);
+ u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
+ void (*reo_setup)(struct ath11k_base *ab);
+ u16 (*mpdu_info_get_peerid)(struct hal_rx_mpdu_info *mpdu_info);
+ bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
+ u8* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
+ u32 (*get_ring_selector)(struct sk_buff *skb);
+};
+
+extern const struct ath11k_hw_ops ipq8074_ops;
+extern const struct ath11k_hw_ops ipq6018_ops;
+extern const struct ath11k_hw_ops qca6390_ops;
+extern const struct ath11k_hw_ops qcn9074_ops;
+extern const struct ath11k_hw_ops wcn6855_ops;
+extern const struct ath11k_hw_ops wcn6750_ops;
+extern const struct ath11k_hw_ops ipq5018_ops;
+
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074;
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750;
+
+extern const struct ce_ie_addr ath11k_ce_ie_addr_ipq8074;
+extern const struct ce_ie_addr ath11k_ce_ie_addr_ipq5018;
+
+extern const struct ce_remap ath11k_ce_remap_ipq5018;
+
+extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074;
+extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390;
+extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750;
+
+static inline
+int ath11k_hw_get_mac_from_pdev_id(struct ath11k_hw_params *hw,
+ int pdev_idx)
+{
+ if (hw->hw_ops->get_hw_mac_from_pdev_id)
+ return hw->hw_ops->get_hw_mac_from_pdev_id(pdev_idx);
+
+ return 0;
+}
+
+static inline int ath11k_hw_mac_id_to_pdev_id(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ if (hw->hw_ops->mac_id_to_pdev_id)
+ return hw->hw_ops->mac_id_to_pdev_id(hw, mac_id);
+
+ return 0;
+}
+
+static inline int ath11k_hw_mac_id_to_srng_id(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ if (hw->hw_ops->mac_id_to_srng_id)
+ return hw->hw_ops->mac_id_to_srng_id(hw, mac_id);
+
+ return 0;
+}
+
+struct ath11k_fw_ie {
+ __le32 id;
+ __le32 len;
+ u8 data[];
+};
+
+enum ath11k_bd_ie_board_type {
+ ATH11K_BD_IE_BOARD_NAME = 0,
+ ATH11K_BD_IE_BOARD_DATA = 1,
+};
+
+enum ath11k_bd_ie_regdb_type {
+ ATH11K_BD_IE_REGDB_NAME = 0,
+ ATH11K_BD_IE_REGDB_DATA = 1,
+};
+
+enum ath11k_bd_ie_type {
+ /* contains sub IEs of enum ath11k_bd_ie_board_type */
+ ATH11K_BD_IE_BOARD = 0,
+ /* contains sub IEs of enum ath11k_bd_ie_regdb_type */
+ ATH11K_BD_IE_REGDB = 1,
+};
+
+struct ath11k_hw_regs {
+ u32 hal_tcl1_ring_base_lsb;
+ u32 hal_tcl1_ring_base_msb;
+ u32 hal_tcl1_ring_id;
+ u32 hal_tcl1_ring_misc;
+ u32 hal_tcl1_ring_tp_addr_lsb;
+ u32 hal_tcl1_ring_tp_addr_msb;
+ u32 hal_tcl1_ring_consumer_int_setup_ix0;
+ u32 hal_tcl1_ring_consumer_int_setup_ix1;
+ u32 hal_tcl1_ring_msi1_base_lsb;
+ u32 hal_tcl1_ring_msi1_base_msb;
+ u32 hal_tcl1_ring_msi1_data;
+ u32 hal_tcl2_ring_base_lsb;
+ u32 hal_tcl_ring_base_lsb;
+
+ u32 hal_tcl_status_ring_base_lsb;
+
+ u32 hal_reo1_ring_base_lsb;
+ u32 hal_reo1_ring_base_msb;
+ u32 hal_reo1_ring_id;
+ u32 hal_reo1_ring_misc;
+ u32 hal_reo1_ring_hp_addr_lsb;
+ u32 hal_reo1_ring_hp_addr_msb;
+ u32 hal_reo1_ring_producer_int_setup;
+ u32 hal_reo1_ring_msi1_base_lsb;
+ u32 hal_reo1_ring_msi1_base_msb;
+ u32 hal_reo1_ring_msi1_data;
+ u32 hal_reo2_ring_base_lsb;
+ u32 hal_reo1_aging_thresh_ix_0;
+ u32 hal_reo1_aging_thresh_ix_1;
+ u32 hal_reo1_aging_thresh_ix_2;
+ u32 hal_reo1_aging_thresh_ix_3;
+
+ u32 hal_reo1_ring_hp;
+ u32 hal_reo1_ring_tp;
+ u32 hal_reo2_ring_hp;
+
+ u32 hal_reo_tcl_ring_base_lsb;
+ u32 hal_reo_tcl_ring_hp;
+
+ u32 hal_reo_status_ring_base_lsb;
+ u32 hal_reo_status_hp;
+
+ u32 hal_reo_cmd_ring_base_lsb;
+ u32 hal_reo_cmd_ring_hp;
+
+ u32 hal_sw2reo_ring_base_lsb;
+ u32 hal_sw2reo_ring_hp;
+
+ u32 hal_seq_wcss_umac_ce0_src_reg;
+ u32 hal_seq_wcss_umac_ce0_dst_reg;
+ u32 hal_seq_wcss_umac_ce1_src_reg;
+ u32 hal_seq_wcss_umac_ce1_dst_reg;
+
+ u32 hal_wbm_idle_link_ring_base_lsb;
+ u32 hal_wbm_idle_link_ring_misc;
+
+ u32 hal_wbm_release_ring_base_lsb;
+
+ u32 hal_wbm0_release_ring_base_lsb;
+ u32 hal_wbm1_release_ring_base_lsb;
+
+ u32 pcie_qserdes_sysclk_en_sel;
+ u32 pcie_pcs_osc_dtct_config_base;
+
+ u32 hal_shadow_base_addr;
+ u32 hal_reo1_misc_ctl;
+};
+
+extern const struct ath11k_hw_regs ipq8074_regs;
+extern const struct ath11k_hw_regs qca6390_regs;
+extern const struct ath11k_hw_regs qcn9074_regs;
+extern const struct ath11k_hw_regs wcn6855_regs;
+extern const struct ath11k_hw_regs wcn6750_regs;
+extern const struct ath11k_hw_regs ipq5018_regs;
+
+static inline const char *ath11k_bd_ie_type_str(enum ath11k_bd_ie_type type)
+{
+ switch (type) {
+ case ATH11K_BD_IE_BOARD:
+ return "board data";
+ case ATH11K_BD_IE_REGDB:
+ return "regdb data";
+ }
+
+ return "unknown";
+}
+
+extern const struct cfg80211_sar_capa ath11k_hw_sar_capa_wcn6855;
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
new file mode 100644
index 000000000000..3276fe443502
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -0,0 +1,10873 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+#include <linux/etherdevice.h>
+#include <linux/bitfield.h>
+#include <linux/inetdevice.h>
+#include <net/if_inet6.h>
+#include <net/ipv6.h>
+
+#include "mac.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "testmode.h"
+#include "peer.h"
+#include "debugfs_sta.h"
+#include "hif.h"
+#include "wow.h"
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN6G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_6GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel ath11k_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_channel ath11k_5ghz_channels[] = {
+ CHAN5G(36, 5180, 0),
+ CHAN5G(40, 5200, 0),
+ CHAN5G(44, 5220, 0),
+ CHAN5G(48, 5240, 0),
+ CHAN5G(52, 5260, 0),
+ CHAN5G(56, 5280, 0),
+ CHAN5G(60, 5300, 0),
+ CHAN5G(64, 5320, 0),
+ CHAN5G(100, 5500, 0),
+ CHAN5G(104, 5520, 0),
+ CHAN5G(108, 5540, 0),
+ CHAN5G(112, 5560, 0),
+ CHAN5G(116, 5580, 0),
+ CHAN5G(120, 5600, 0),
+ CHAN5G(124, 5620, 0),
+ CHAN5G(128, 5640, 0),
+ CHAN5G(132, 5660, 0),
+ CHAN5G(136, 5680, 0),
+ CHAN5G(140, 5700, 0),
+ CHAN5G(144, 5720, 0),
+ CHAN5G(149, 5745, 0),
+ CHAN5G(153, 5765, 0),
+ CHAN5G(157, 5785, 0),
+ CHAN5G(161, 5805, 0),
+ CHAN5G(165, 5825, 0),
+ CHAN5G(169, 5845, 0),
+ CHAN5G(173, 5865, 0),
+ CHAN5G(177, 5885, 0),
+};
+
+static const struct ieee80211_channel ath11k_6ghz_channels[] = {
+ CHAN6G(1, 5955, 0),
+ CHAN6G(5, 5975, 0),
+ CHAN6G(9, 5995, 0),
+ CHAN6G(13, 6015, 0),
+ CHAN6G(17, 6035, 0),
+ CHAN6G(21, 6055, 0),
+ CHAN6G(25, 6075, 0),
+ CHAN6G(29, 6095, 0),
+ CHAN6G(33, 6115, 0),
+ CHAN6G(37, 6135, 0),
+ CHAN6G(41, 6155, 0),
+ CHAN6G(45, 6175, 0),
+ CHAN6G(49, 6195, 0),
+ CHAN6G(53, 6215, 0),
+ CHAN6G(57, 6235, 0),
+ CHAN6G(61, 6255, 0),
+ CHAN6G(65, 6275, 0),
+ CHAN6G(69, 6295, 0),
+ CHAN6G(73, 6315, 0),
+ CHAN6G(77, 6335, 0),
+ CHAN6G(81, 6355, 0),
+ CHAN6G(85, 6375, 0),
+ CHAN6G(89, 6395, 0),
+ CHAN6G(93, 6415, 0),
+ CHAN6G(97, 6435, 0),
+ CHAN6G(101, 6455, 0),
+ CHAN6G(105, 6475, 0),
+ CHAN6G(109, 6495, 0),
+ CHAN6G(113, 6515, 0),
+ CHAN6G(117, 6535, 0),
+ CHAN6G(121, 6555, 0),
+ CHAN6G(125, 6575, 0),
+ CHAN6G(129, 6595, 0),
+ CHAN6G(133, 6615, 0),
+ CHAN6G(137, 6635, 0),
+ CHAN6G(141, 6655, 0),
+ CHAN6G(145, 6675, 0),
+ CHAN6G(149, 6695, 0),
+ CHAN6G(153, 6715, 0),
+ CHAN6G(157, 6735, 0),
+ CHAN6G(161, 6755, 0),
+ CHAN6G(165, 6775, 0),
+ CHAN6G(169, 6795, 0),
+ CHAN6G(173, 6815, 0),
+ CHAN6G(177, 6835, 0),
+ CHAN6G(181, 6855, 0),
+ CHAN6G(185, 6875, 0),
+ CHAN6G(189, 6895, 0),
+ CHAN6G(193, 6915, 0),
+ CHAN6G(197, 6935, 0),
+ CHAN6G(201, 6955, 0),
+ CHAN6G(205, 6975, 0),
+ CHAN6G(209, 6995, 0),
+ CHAN6G(213, 7015, 0),
+ CHAN6G(217, 7035, 0),
+ CHAN6G(221, 7055, 0),
+ CHAN6G(225, 7075, 0),
+ CHAN6G(229, 7095, 0),
+ CHAN6G(233, 7115, 0),
+
+ /* new addition in IEEE Std 802.11ax-2021 */
+ CHAN6G(2, 5935, 0),
+};
+
+static struct ieee80211_rate ath11k_legacy_rates[] = {
+ { .bitrate = 10,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_2M,
+ .hw_value_short = ATH11K_HW_RATE_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_5_5M,
+ .hw_value_short = ATH11K_HW_RATE_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_11M,
+ .hw_value_short = ATH11K_HW_RATE_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH11K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH11K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH11K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH11K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH11K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH11K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH11K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH11K_HW_RATE_OFDM_54M },
+};
+
+static const int
+ath11k_phymodes[NUM_NL80211_BANDS][ATH11K_CHAN_WIDTH_NUM] = {
+ [NL80211_BAND_2GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20_2G,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20_2G,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40_2G,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80_2G,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN,
+ },
+ [NL80211_BAND_5GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
+ [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
+ },
+ [NL80211_BAND_6GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
+ [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
+ },
+
+};
+
+const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default = {
+ .rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
+ .pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
+ .pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
+ .pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
+ .pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 |
+ HTT_RX_FP_CTRL_FILTER_FLASG3
+};
+
+#define ATH11K_MAC_FIRST_OFDM_RATE_IDX 4
+#define ath11k_g_rates ath11k_legacy_rates
+#define ath11k_g_rates_size (ARRAY_SIZE(ath11k_legacy_rates))
+#define ath11k_a_rates (ath11k_legacy_rates + 4)
+#define ath11k_a_rates_size (ARRAY_SIZE(ath11k_legacy_rates) - 4)
+
+#define ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD 200 /* in msecs */
+
+/* Overhead due to the processing of channel switch events from FW */
+#define ATH11K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD 10 /* in msecs */
+
+static const u32 ath11k_smps_map[] = {
+ [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
+ [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
+ [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
+ [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
+};
+
+enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy)
+{
+ enum nl80211_he_ru_alloc ret;
+
+ switch (ru_phy) {
+ case RU_26:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ case RU_52:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ break;
+ case RU_106:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ break;
+ case RU_242:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ break;
+ case RU_484:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ break;
+ case RU_996:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ default:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ }
+
+ return ret;
+}
+
+enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
+{
+ enum nl80211_he_ru_alloc ret;
+
+ switch (ru_tones) {
+ case 26:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ case 52:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ break;
+ case 106:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ break;
+ case 242:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ break;
+ case 484:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ break;
+ case 996:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case (996 * 2):
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ default:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ }
+
+ return ret;
+}
+
+enum nl80211_he_gi ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi)
+{
+ enum nl80211_he_gi ret;
+
+ switch (sgi) {
+ case RX_MSDU_START_SGI_0_8_US:
+ ret = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case RX_MSDU_START_SGI_1_6_US:
+ ret = NL80211_RATE_INFO_HE_GI_1_6;
+ break;
+ case RX_MSDU_START_SGI_3_2_US:
+ ret = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ default:
+ ret = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ }
+
+ return ret;
+}
+
+u8 ath11k_mac_bw_to_mac80211_bw(u8 bw)
+{
+ u8 ret = 0;
+
+ switch (bw) {
+ case ATH11K_BW_20:
+ ret = RATE_INFO_BW_20;
+ break;
+ case ATH11K_BW_40:
+ ret = RATE_INFO_BW_40;
+ break;
+ case ATH11K_BW_80:
+ ret = RATE_INFO_BW_80;
+ break;
+ case ATH11K_BW_160:
+ ret = RATE_INFO_BW_160;
+ break;
+ }
+
+ return ret;
+}
+
+enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw)
+{
+ switch (bw) {
+ case RATE_INFO_BW_20:
+ return ATH11K_BW_20;
+ case RATE_INFO_BW_40:
+ return ATH11K_BW_40;
+ case RATE_INFO_BW_80:
+ return ATH11K_BW_80;
+ case RATE_INFO_BW_160:
+ return ATH11K_BW_160;
+ default:
+ return ATH11K_BW_20;
+ }
+}
+
+int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
+ u16 *rate)
+{
+ /* As default, it is OFDM rates */
+ int i = ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+ int max_rates_idx = ath11k_g_rates_size;
+
+ if (preamble == WMI_RATE_PREAMBLE_CCK) {
+ hw_rc &= ~ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK;
+ i = 0;
+ max_rates_idx = ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+ }
+
+ while (i < max_rates_idx) {
+ if (hw_rc == ath11k_legacy_rates[i].hw_value) {
+ *rateidx = i;
+ *rate = ath11k_legacy_rates[i].bitrate;
+ return 0;
+ }
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+static int get_num_chains(u32 mask)
+{
+ int num_chains = 0;
+
+ while (mask) {
+ if (mask & BIT(0))
+ num_chains++;
+ mask >>= 1;
+ }
+
+ return num_chains;
+}
+
+u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++)
+ if (sband->bitrates[i].bitrate == bitrate)
+ return i;
+
+ return 0;
+}
+
+static u32
+ath11k_mac_max_ht_nss(const u8 *ht_mcs_mask)
+{
+ int nss;
+
+ for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
+ if (ht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u32
+ath11k_mac_max_vht_nss(const u16 *vht_mcs_mask)
+{
+ int nss;
+
+ for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
+ if (vht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u32
+ath11k_mac_max_he_nss(const u16 *he_mcs_mask)
+{
+ int nss;
+
+ for (nss = NL80211_HE_NSS_MAX - 1; nss >= 0; nss--)
+ if (he_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u8 ath11k_parse_mpdudensity(u8 mpdudensity)
+{
+/* 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
+ * 0 for no restriction
+ * 1 for 1/4 us
+ * 2 for 1/2 us
+ * 3 for 1 us
+ * 4 for 2 us
+ * 5 for 4 us
+ * 6 for 8 us
+ * 7 for 16 us
+ */
+ switch (mpdudensity) {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ /* Our lower layer calculations limit our precision to
+ * 1 microsecond
+ */
+ return 1;
+ case 4:
+ return 2;
+ case 5:
+ return 4;
+ case 6:
+ return 8;
+ case 7:
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+static int ath11k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ieee80211_chanctx_conf *conf;
+
+ rcu_read_lock();
+ conf = rcu_dereference(vif->bss_conf.chanctx_conf);
+ if (!conf) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ *def = conf->def;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static bool ath11k_mac_bitrate_is_cck(int bitrate)
+{
+ switch (bitrate) {
+ case 10:
+ case 20:
+ case 55:
+ case 110:
+ return true;
+ }
+
+ return false;
+}
+
+u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck)
+{
+ const struct ieee80211_rate *rate;
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ rate = &sband->bitrates[i];
+
+ if (ath11k_mac_bitrate_is_cck(rate->bitrate) != cck)
+ continue;
+
+ if (rate->hw_value == hw_rate)
+ return i;
+ else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
+ rate->hw_value_short == hw_rate)
+ return i;
+ }
+
+ return 0;
+}
+
+static u8 ath11k_mac_bitrate_to_rate(int bitrate)
+{
+ return DIV_ROUND_UP(bitrate, 5) |
+ (ath11k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
+}
+
+static void ath11k_get_arvif_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_vif_iter *arvif_iter = data;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+
+ if (arvif->vdev_id == arvif_iter->vdev_id)
+ arvif_iter->arvif = arvif;
+}
+
+struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id)
+{
+ struct ath11k_vif_iter arvif_iter;
+ u32 flags;
+
+ memset(&arvif_iter, 0, sizeof(struct ath11k_vif_iter));
+ arvif_iter.vdev_id = vdev_id;
+
+ flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ flags,
+ ath11k_get_arvif_iter,
+ &arvif_iter);
+ if (!arvif_iter.arvif) {
+ ath11k_warn(ar->ab, "No VIF found for vdev %d\n", vdev_id);
+ return NULL;
+ }
+
+ return arvif_iter.arvif;
+}
+
+struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
+ u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k_vif *arvif;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar &&
+ (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) {
+ arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id);
+ if (arvif)
+ return arvif;
+ }
+ }
+
+ return NULL;
+}
+
+struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ if (pdev->ar->allocated_vdev_map & (1LL << vdev_id))
+ return pdev->ar;
+ }
+ }
+
+ return NULL;
+}
+
+struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+
+ if (ab->hw_params.single_pdev_only) {
+ pdev = rcu_dereference(ab->pdevs_active[0]);
+ return pdev ? pdev->ar : NULL;
+ }
+
+ if (WARN_ON(pdev_id > ab->num_radios))
+ return NULL;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (ab->fw_mode == ATH11K_FIRMWARE_MODE_FTM)
+ pdev = &ab->pdevs[i];
+ else
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+
+ if (pdev && pdev->pdev_id == pdev_id)
+ return (pdev->ar ? pdev->ar : NULL);
+ }
+
+ return NULL;
+}
+
+struct ath11k_vif *ath11k_mac_get_vif_up(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ struct ath11k_vif *arvif;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up)
+ return arvif;
+ }
+ }
+
+ return NULL;
+}
+
+static bool ath11k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BAND band2)
+{
+ return (((band1 == NL80211_BAND_2GHZ) && (band2 & WMI_HOST_WLAN_2G_CAP)) ||
+ (((band1 == NL80211_BAND_5GHZ) || (band1 == NL80211_BAND_6GHZ)) &&
+ (band2 & WMI_HOST_WLAN_5G_CAP)));
+}
+
+u8 ath11k_mac_get_target_pdev_id_from_vif(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 pdev_id = ab->target_pdev_ids[0].pdev_id;
+ int i;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return pdev_id;
+
+ band = def.chan->band;
+
+ for (i = 0; i < ab->target_pdev_count; i++) {
+ if (ath11k_mac_band_match(band, ab->target_pdev_ids[i].supported_bands))
+ return ab->target_pdev_ids[i].pdev_id;
+ }
+
+ return pdev_id;
+}
+
+u8 ath11k_mac_get_target_pdev_id(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+
+ arvif = ath11k_mac_get_vif_up(ar->ab);
+
+ if (arvif)
+ return ath11k_mac_get_target_pdev_id_from_vif(arvif);
+ else
+ return ar->ab->target_pdev_ids[0].pdev_id;
+}
+
+static void ath11k_pdev_caps_update(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+
+ ar->max_tx_power = ab->target_caps.hw_max_tx_power;
+
+ /* FIXME Set min_tx_power to ab->target_caps.hw_min_tx_power.
+ * But since the received value in svcrdy is same as hw_max_tx_power,
+ * we can set ar->min_tx_power to 0 currently until
+ * this is fixed in firmware
+ */
+ ar->min_tx_power = 0;
+
+ ar->txpower_limit_2g = ar->max_tx_power;
+ ar->txpower_limit_5g = ar->max_tx_power;
+ ar->txpower_scale = WMI_HOST_TP_SCALE_MAX;
+}
+
+static int ath11k_mac_txpower_recalc(struct ath11k *ar)
+{
+ struct ath11k_pdev *pdev = ar->pdev;
+ struct ath11k_vif *arvif;
+ int ret, txpower = -1;
+ u32 param;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->txpower <= 0)
+ continue;
+
+ if (txpower == -1)
+ txpower = arvif->txpower;
+ else
+ txpower = min(txpower, arvif->txpower);
+ }
+
+ if (txpower == -1)
+ return 0;
+
+ /* txpwr is set as 2 units per dBm in FW*/
+ txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower),
+ ar->max_tx_power) * 2;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower to set in hw %d\n",
+ txpower / 2);
+
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+ ar->txpower_limit_2g != txpower) {
+ param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
+ ret = ath11k_wmi_pdev_set_param(ar, param,
+ txpower, ar->pdev->pdev_id);
+ if (ret)
+ goto fail;
+ ar->txpower_limit_2g = txpower;
+ }
+
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) &&
+ ar->txpower_limit_5g != txpower) {
+ param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
+ ret = ath11k_wmi_pdev_set_param(ar, param,
+ txpower, ar->pdev->pdev_id);
+ if (ret)
+ goto fail;
+ ar->txpower_limit_5g = txpower;
+ }
+
+ return 0;
+
+fail:
+ ath11k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d\n",
+ txpower / 2, param, ret);
+ return ret;
+}
+
+static int ath11k_recalc_rtscts_prot(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 vdev_param, rts_cts = 0;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS;
+
+ /* Enable RTS/CTS protection for sw retries (when legacy stations
+ * are in BSS) or by default only for second rate series.
+ * TODO: Check if we need to enable CTS 2 Self in any case
+ */
+ rts_cts = WMI_USE_RTS_CTS;
+
+ if (arvif->num_legacy_stations > 0)
+ rts_cts |= WMI_RTSCTS_ACROSS_SW_RETRIES << 4;
+ else
+ rts_cts |= WMI_RTSCTS_FOR_SECOND_RATESERIES << 4;
+
+ /* Need not send duplicate param value to firmware */
+ if (arvif->rtscts_prot_mode == rts_cts)
+ return 0;
+
+ arvif->rtscts_prot_mode = rts_cts;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d recalc rts/cts prot %d\n",
+ arvif->vdev_id, rts_cts);
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rts_cts);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ return ret;
+}
+
+static int ath11k_mac_set_kickout(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 param;
+ int ret;
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ ATH11K_KICKOUT_THRESHOLD,
+ ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH11K_KEEPALIVE_MIN_IDLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH11K_KEEPALIVE_MAX_IDLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH11K_KEEPALIVE_MAX_UNRESPONSIVE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_mac_peer_cleanup_all(struct ath11k *ar)
+{
+ struct ath11k_peer *peer, *tmp;
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ mutex_lock(&ab->tbl_mtx_lock);
+ spin_lock_bh(&ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ ath11k_peer_rx_tid_cleanup(ar, peer);
+ ath11k_peer_rhash_delete(ab, peer);
+ list_del(&peer->list);
+ kfree(peer);
+ }
+ spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
+
+ ar->num_peers = 0;
+ ar->num_stations = 0;
+}
+
+static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH11K_VDEV_SETUP_TIMEOUT_HZ))
+ return -ETIMEDOUT;
+
+ return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
+}
+
+static void
+ath11k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def **def = data;
+
+ *def = &conf->def;
+}
+
+static int ath11k_mac_monitor_vdev_start(struct ath11k *ar, int vdev_id,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_channel *channel;
+ struct wmi_vdev_start_req_arg arg = {};
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ channel = chandef->chan;
+
+ arg.vdev_id = vdev_id;
+ arg.channel.freq = channel->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
+
+ arg.channel.mode = ath11k_phymodes[chandef->chan->band][chandef->width];
+ arg.channel.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR);
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = channel->max_power;
+ arg.channel.max_reg_power = channel->max_reg_power;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain;
+
+ arg.pref_tx_streams = ar->num_tx_chains;
+ arg.pref_rx_streams = ar->num_rx_chains;
+
+ arg.channel.passive = !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+ reinit_completion(&ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath11k_wmi_vdev_start(ar, &arg, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr, NULL, 0, 0);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
+ vdev_id, ret);
+ goto vdev_stop;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %i started\n",
+ vdev_id);
+
+ return 0;
+
+vdev_stop:
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath11k_wmi_vdev_stop(ar, vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i stop: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ return -EIO;
+}
+
+static int ath11k_mac_monitor_vdev_stop(struct ath11k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath11k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %i stopped\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+}
+
+static int ath11k_mac_monitor_vdev_create(struct ath11k *ar)
+{
+ struct ath11k_pdev *pdev = ar->pdev;
+ struct vdev_create_params param = {};
+ int bit, ret;
+ u8 tmp_addr[6] = {};
+ u16 nss;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags))
+ return 0;
+
+ if (ar->ab->free_vdev_map == 0) {
+ ath11k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n");
+ return -ENOMEM;
+ }
+
+ bit = __ffs64(ar->ab->free_vdev_map);
+
+ ar->monitor_vdev_id = bit;
+
+ param.if_id = ar->monitor_vdev_id;
+ param.type = WMI_VDEV_TYPE_MONITOR;
+ param.subtype = WMI_VDEV_SUBTYPE_NONE;
+ param.pdev_id = pdev->pdev_id;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ param.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
+ param.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ param.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
+ param.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
+ }
+
+ ret = ath11k_wmi_vdev_create(ar, tmp_addr, &param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n",
+ ar->monitor_vdev_id, ret);
+ ar->monitor_vdev_id = -1;
+ return ret;
+ }
+
+ nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id,
+ WMI_VDEV_PARAM_NSS, nss);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
+ ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret);
+ goto err_vdev_del;
+ }
+
+ ret = ath11k_mac_txpower_recalc(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to recalc txpower for monitor vdev %d: %d\n",
+ ar->monitor_vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id;
+ ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ar->num_created_vdevs++;
+ set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %d created\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+
+err_vdev_del:
+ ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ ar->monitor_vdev_id = -1;
+ return ret;
+}
+
+static int ath11k_mac_monitor_vdev_delete(struct ath11k *ar)
+{
+ int ret;
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags))
+ return 0;
+
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH11K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "Timeout in receiving vdev delete response\n");
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %d deleted\n",
+ ar->monitor_vdev_id);
+
+ ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id);
+ ar->num_created_vdevs--;
+ ar->monitor_vdev_id = -1;
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ }
+
+ return ret;
+}
+
+static int ath11k_mac_monitor_start(struct ath11k *ar)
+{
+ struct cfg80211_chan_def *chandef = NULL;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
+ return 0;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath11k_mac_get_any_chandef_iter,
+ &chandef);
+ if (!chandef)
+ return 0;
+
+ ret = ath11k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret);
+ ath11k_mac_monitor_vdev_delete(ar);
+ return ret;
+ }
+
+ set_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+
+ ar->num_started_vdevs++;
+ ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during start: %d",
+ ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor started\n");
+
+ return 0;
+}
+
+static int ath11k_mac_monitor_stop(struct ath11k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
+ return 0;
+
+ ret = ath11k_mac_monitor_vdev_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+ ar->num_started_vdevs--;
+
+ ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, true);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during stop: %d",
+ ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor stopped ret %d\n", ret);
+
+ return 0;
+}
+
+static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_conf *conf = &ar->hw->conf;
+ enum wmi_sta_powersave_param param;
+ enum wmi_sta_ps_mode psmode;
+ int ret;
+ int timeout;
+ bool enable_ps;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->vif->type != NL80211_IFTYPE_STATION)
+ return 0;
+
+ enable_ps = arvif->ps;
+
+ if (enable_ps) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+ timeout = conf->dynamic_ps_timeout;
+ if (timeout == 0) {
+ /* firmware doesn't like 0 */
+ timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000;
+ }
+
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+ timeout);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ } else {
+ psmode = WMI_STA_PS_MODE_DISABLED;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath11k_mac_config_ps(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_mac_vif_setup_ps(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup powersave: %d\n", ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int ath11k_mac_op_config(struct ieee80211_hw *hw, int radio_idx, u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ set_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED,
+ &ar->monitor_flags))
+ goto out;
+
+ ret = ath11k_mac_monitor_vdev_create(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to create monitor vdev: %d",
+ ret);
+ goto out;
+ }
+
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor: %d",
+ ret);
+ goto err_mon_del;
+ }
+ } else {
+ clear_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags);
+
+ if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED,
+ &ar->monitor_flags))
+ goto out;
+
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor: %d",
+ ret);
+ goto out;
+ }
+
+ ret = ath11k_mac_monitor_vdev_delete(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to delete monitor vdev: %d",
+ ret);
+ goto out;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+
+err_mon_del:
+ ath11k_mac_monitor_vdev_delete(ar);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath11k_mac_setup_nontx_vif_rsnie(struct ath11k_vif *arvif,
+ bool tx_arvif_rsnie_present,
+ const u8 *profile, u8 profile_len)
+{
+ if (cfg80211_find_ie(WLAN_EID_RSN, profile, profile_len)) {
+ arvif->rsnie_present = true;
+ } else if (tx_arvif_rsnie_present) {
+ int i;
+ u8 nie_len;
+ const u8 *nie = cfg80211_find_ext_ie(WLAN_EID_EXT_NON_INHERITANCE,
+ profile, profile_len);
+ if (!nie)
+ return;
+
+ nie_len = nie[1];
+ nie += 2;
+ for (i = 0; i < nie_len; i++) {
+ if (nie[i] == WLAN_EID_RSN) {
+ arvif->rsnie_present = false;
+ break;
+ }
+ }
+ }
+}
+
+static bool ath11k_mac_set_nontx_vif_params(struct ath11k_vif *tx_arvif,
+ struct ath11k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ieee80211_mgmt *mgmt;
+ const u8 *ies, *profile, *next_profile;
+ int ies_len;
+
+ ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
+ mgmt = (struct ieee80211_mgmt *)bcn->data;
+ ies += sizeof(mgmt->u.beacon);
+ ies_len = skb_tail_pointer(bcn) - ies;
+
+ ies = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ies, ies_len);
+ arvif->rsnie_present = tx_arvif->rsnie_present;
+
+ while (ies) {
+ u8 mbssid_len;
+
+ ies_len -= (2 + ies[1]);
+ mbssid_len = ies[1] - 1;
+ profile = &ies[3];
+
+ while (mbssid_len) {
+ u8 profile_len;
+
+ profile_len = profile[1];
+ next_profile = profile + (2 + profile_len);
+ mbssid_len -= (2 + profile_len);
+
+ profile += 2;
+ profile_len -= (2 + profile[1]);
+ profile += (2 + profile[1]); /* nontx capabilities */
+ profile_len -= (2 + profile[1]);
+ profile += (2 + profile[1]); /* SSID */
+ if (profile[2] == arvif->vif->bss_conf.bssid_index) {
+ profile_len -= 5;
+ profile = profile + 5;
+ ath11k_mac_setup_nontx_vif_rsnie(arvif,
+ tx_arvif->rsnie_present,
+ profile,
+ profile_len);
+ return true;
+ }
+ profile = next_profile;
+ }
+ ies = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, profile,
+ ies_len);
+ }
+
+ return false;
+}
+
+static int ath11k_mac_setup_bcn_p2p_ie(struct ath11k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ieee80211_mgmt *mgmt;
+ const u8 *p2p_ie;
+ int ret;
+
+ mgmt = (void *)bcn->data;
+ p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ mgmt->u.beacon.variable,
+ bcn->len - (mgmt->u.beacon.variable -
+ bcn->data));
+ if (!p2p_ie)
+ return -ENOENT;
+
+ ret = ath11k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit P2P GO bcn ie for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath11k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
+ u8 oui_type, size_t ie_offset)
+{
+ size_t len;
+ const u8 *next, *end;
+ u8 *ie;
+
+ if (WARN_ON(skb->len < ie_offset))
+ return -EINVAL;
+
+ ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
+ skb->data + ie_offset,
+ skb->len - ie_offset);
+ if (!ie)
+ return -ENOENT;
+
+ len = ie[1] + 2;
+ end = skb->data + skb->len;
+ next = ie + len;
+
+ if (WARN_ON(next > end))
+ return -EINVAL;
+
+ memmove(ie, next, end - next);
+ skb_trim(skb, skb->len - len);
+
+ return 0;
+}
+
+static int ath11k_mac_set_vif_params(struct ath11k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ath11k_base *ab = arvif->ar->ab;
+ struct ieee80211_mgmt *mgmt;
+ int ret = 0;
+ u8 *ies;
+
+ ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
+ mgmt = (struct ieee80211_mgmt *)bcn->data;
+ ies += sizeof(mgmt->u.beacon);
+
+ if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
+ arvif->rsnie_present = true;
+ else
+ arvif->rsnie_present = false;
+
+ if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies, (skb_tail_pointer(bcn) - ies)))
+ arvif->wpaie_present = true;
+ else
+ arvif->wpaie_present = false;
+
+ if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ return ret;
+
+ ret = ath11k_mac_setup_bcn_p2p_ie(arvif, bcn);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup P2P GO bcn ie: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* P2P IE is inserted by firmware automatically (as
+ * configured above) so remove it from the base beacon
+ * template to avoid duplicate P2P IEs in beacon frames.
+ */
+ ret = ath11k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA,
+ WLAN_OUI_TYPE_WFA_P2P,
+ offsetof(struct ieee80211_mgmt,
+ u.beacon.variable));
+ if (ret) {
+ ath11k_warn(ab, "failed to remove P2P vendor ie: %d\n",
+ ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct ath11k_vif *ath11k_mac_get_tx_arvif(struct ath11k_vif *arvif)
+{
+ struct ieee80211_bss_conf *link_conf, *tx_bss_conf;
+
+ lockdep_assert_wiphy(arvif->ar->hw->wiphy);
+
+ link_conf = &arvif->vif->bss_conf;
+ tx_bss_conf = wiphy_dereference(arvif->ar->hw->wiphy, link_conf->tx_bss_conf);
+ if (tx_bss_conf)
+ return ath11k_vif_to_arvif(tx_bss_conf->vif);
+
+ return NULL;
+}
+
+static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif,
+ struct ath11k_vif *tx_arvif)
+{
+ struct ieee80211_ema_beacons *beacons;
+ int ret = 0;
+ bool nontx_vif_params_set = false;
+ u32 params = 0;
+ u8 i = 0;
+
+ beacons = ieee80211_beacon_get_template_ema_list(tx_arvif->ar->hw,
+ tx_arvif->vif, 0);
+ if (!beacons || !beacons->cnt) {
+ ath11k_warn(arvif->ar->ab,
+ "failed to get ema beacon templates from mac80211\n");
+ return -EPERM;
+ }
+
+ if (tx_arvif == arvif) {
+ if (ath11k_mac_set_vif_params(tx_arvif, beacons->bcn[0].skb))
+ return -EINVAL;
+ } else {
+ arvif->wpaie_present = tx_arvif->wpaie_present;
+ }
+
+ for (i = 0; i < beacons->cnt; i++) {
+ if (tx_arvif != arvif && !nontx_vif_params_set)
+ nontx_vif_params_set =
+ ath11k_mac_set_nontx_vif_params(tx_arvif, arvif,
+ beacons->bcn[i].skb);
+
+ params = beacons->cnt;
+ params |= (i << WMI_EMA_TMPL_IDX_SHIFT);
+ params |= ((!i ? 1 : 0) << WMI_EMA_FIRST_TMPL_SHIFT);
+ params |= ((i + 1 == beacons->cnt ? 1 : 0) << WMI_EMA_LAST_TMPL_SHIFT);
+
+ ret = ath11k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id,
+ &beacons->bcn[i].offs,
+ beacons->bcn[i].skb, params);
+ if (ret) {
+ ath11k_warn(tx_arvif->ar->ab,
+ "failed to set ema beacon template id %i error %d\n",
+ i, ret);
+ break;
+ }
+ }
+
+ ieee80211_beacon_free_ema_list(beacons);
+
+ if (tx_arvif != arvif && !nontx_vif_params_set)
+ return -EINVAL; /* Profile not found in the beacons */
+
+ return ret;
+}
+
+static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif,
+ struct ath11k_vif *tx_arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_mutable_offsets offs = {};
+ struct sk_buff *bcn;
+ int ret;
+
+ if (tx_arvif != arvif) {
+ ar = tx_arvif->ar;
+ ab = ar->ab;
+ hw = ar->hw;
+ vif = tx_arvif->vif;
+ }
+
+ bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+ if (!bcn) {
+ ath11k_warn(ab, "failed to get beacon template from mac80211\n");
+ return -EPERM;
+ }
+
+ if (tx_arvif == arvif) {
+ if (ath11k_mac_set_vif_params(tx_arvif, bcn))
+ return -EINVAL;
+ } else if (!ath11k_mac_set_nontx_vif_params(tx_arvif, arvif, bcn)) {
+ return -EINVAL;
+ }
+
+ ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, 0);
+ kfree_skb(bcn);
+
+ if (ret)
+ ath11k_warn(ab, "failed to submit beacon template command: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
+{
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ath11k_vif *tx_arvif;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
+
+ /* Target does not expect beacon templates for the already up
+ * non-transmitting interfaces, and results in a crash if sent.
+ */
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
+ if (tx_arvif) {
+ if (arvif != tx_arvif && arvif->is_up)
+ return 0;
+
+ if (vif->bss_conf.ema_ap)
+ return ath11k_mac_setup_bcn_tmpl_ema(arvif, tx_arvif);
+ } else {
+ tx_arvif = arvif;
+ }
+
+ return ath11k_mac_setup_bcn_tmpl_mbssid(arvif, tx_arvif);
+}
+
+void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
+{
+ struct ieee80211_vif *vif = arvif->vif;
+
+ if (!vif->bss_conf.color_change_active && !arvif->bcca_zero_sent)
+ return;
+
+ if (vif->bss_conf.color_change_active &&
+ ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
+ arvif->bcca_zero_sent = true;
+ ieee80211_color_change_finish(vif, 0);
+ return;
+ }
+
+ arvif->bcca_zero_sent = false;
+
+ if (vif->bss_conf.color_change_active)
+ ieee80211_beacon_update_cntdwn(vif, 0);
+ ath11k_mac_setup_bcn_tmpl(arvif);
+}
+
+static void ath11k_control_beaconing(struct ath11k_vif *arvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_vif *tx_arvif;
+ int ret = 0;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (!info->enable_beacon) {
+ ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to down vdev_id %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+ return;
+ }
+
+ /* Install the beacon template to the FW */
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d\n",
+ ret);
+ return;
+ }
+
+ arvif->aid = 0;
+
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
+ ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid,
+ tx_arvif ? tx_arvif->bssid : NULL,
+ info->bssid_index,
+ 1 << info->bssid_indicator);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to bring up vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d up\n", arvif->vdev_id);
+}
+
+static void ath11k_mac_handle_beacon_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = data;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
+ return;
+
+ cancel_delayed_work(&arvif->connection_loss_work);
+}
+
+void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_handle_beacon_iter,
+ skb);
+}
+
+static void ath11k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u32 *vdev_id = data;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+
+ if (arvif->vdev_id != *vdev_id)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_beacon_loss(vif);
+
+ /* Firmware doesn't report beacon loss events repeatedly. If AP probe
+ * (done by mac80211) succeeds but beacons do not resume then it
+ * doesn't make sense to continue operation. Queue connection loss work
+ * which can be cancelled when beacon is received.
+ */
+ ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
+ ATH11K_CONNECTION_LOSS_HZ);
+}
+
+void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_handle_beacon_miss_iter,
+ &vdev_id);
+}
+
+static void ath11k_mac_vif_sta_connection_loss_work(struct work_struct *work)
+{
+ struct ath11k_vif *arvif = container_of(work, struct ath11k_vif,
+ connection_loss_work.work);
+ struct ieee80211_vif *vif = arvif->vif;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_connection_loss(vif);
+}
+
+static void ath11k_peer_assoc_h_basic(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ u32 aid;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ aid = vif->cfg.aid;
+ else
+ aid = sta->aid;
+
+ ether_addr_copy(arg->peer_mac, sta->addr);
+ arg->vdev_id = arvif->vdev_id;
+ arg->peer_associd = aid;
+ arg->auth_flag = true;
+ /* TODO: STA WAR in ath10k for listen interval required? */
+ arg->peer_listen_intval = ar->hw->conf.listen_interval;
+ arg->peer_nss = 1;
+ arg->peer_caps = vif->bss_conf.assoc_capability;
+}
+
+static void ath11k_peer_assoc_h_crypto(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct cfg80211_chan_def def;
+ struct cfg80211_bss *bss;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ const u8 *rsnie = NULL;
+ const u8 *wpaie = NULL;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+
+ if (arvif->rsnie_present || arvif->wpaie_present) {
+ arg->need_ptk_4_way = true;
+ if (arvif->wpaie_present)
+ arg->need_gtk_2_way = true;
+ } else if (bss) {
+ const struct cfg80211_bss_ies *ies;
+
+ rcu_read_lock();
+ rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
+
+ ies = rcu_dereference(bss->ies);
+
+ wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies->data,
+ ies->len);
+ rcu_read_unlock();
+ cfg80211_put_bss(ar->hw->wiphy, bss);
+ }
+
+ /* FIXME: base on RSN IE/WPA IE is a correct idea? */
+ if (rsnie || wpaie) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "%s: rsn ie found\n", __func__);
+ arg->need_ptk_4_way = true;
+ }
+
+ if (wpaie) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "%s: wpa ie found\n", __func__);
+ arg->need_gtk_2_way = true;
+ }
+
+ if (sta->mfp) {
+ /* TODO: Need to check if FW supports PMF? */
+ arg->is_pmf_enabled = true;
+ }
+
+ /* TODO: safe_mode_enabled (bypass 4-way handshake) flag req? */
+}
+
+static void ath11k_peer_assoc_h_rates(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct cfg80211_chan_def def;
+ const struct ieee80211_supported_band *sband;
+ const struct ieee80211_rate *rates;
+ enum nl80211_band band;
+ u32 ratemask;
+ u8 rate;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ sband = ar->hw->wiphy->bands[band];
+ ratemask = sta->deflink.supp_rates[band];
+ ratemask &= arvif->bitrate_mask.control[band].legacy;
+ rates = sband->bitrates;
+
+ rateset->num_rates = 0;
+
+ for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
+ if (!(ratemask & 1))
+ continue;
+
+ rate = ath11k_mac_bitrate_to_rate(rates->bitrate);
+ rateset->rates[rateset->num_rates] = rate;
+ rateset->num_rates++;
+ }
+}
+
+static bool
+ath11k_peer_assoc_h_ht_masked(const u8 *ht_mcs_mask)
+{
+ int nss;
+
+ for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
+ if (ht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static bool
+ath11k_peer_assoc_h_vht_masked(const u16 *vht_mcs_mask)
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
+ if (vht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static void ath11k_peer_assoc_h_ht(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ int i, n;
+ u8 max_nss;
+ u32 stbc;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+
+ if (ath11k_peer_assoc_h_ht_masked(ht_mcs_mask))
+ return;
+
+ arg->ht_flag = true;
+
+ arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ht_cap->ampdu_factor)) - 1;
+
+ arg->peer_mpdu_density =
+ ath11k_parse_mpdudensity(ht_cap->ampdu_density);
+
+ arg->peer_ht_caps = ht_cap->cap;
+ arg->peer_rate_caps |= WMI_HOST_RC_HT_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
+ arg->ldpc_flag = true;
+
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
+ arg->bw_40 = true;
+ arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
+ }
+
+ /* As firmware handles this two flags (IEEE80211_HT_CAP_SGI_20
+ * and IEEE80211_HT_CAP_SGI_40) for enabling SGI, we reset
+ * both flags if guard interval is Default GI
+ */
+ if (arvif->bitrate_mask.control[band].gi == NL80211_TXRATE_DEFAULT_GI)
+ arg->peer_ht_caps &= ~(IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40);
+
+ if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
+ if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40))
+ arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
+ arg->peer_rate_caps |= WMI_HOST_RC_TX_STBC_FLAG;
+ arg->stbc_flag = true;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
+ stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
+ stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc = stbc << WMI_HOST_RC_RX_STBC_FLAG_S;
+ arg->peer_rate_caps |= stbc;
+ arg->stbc_flag = true;
+ }
+
+ if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
+ arg->peer_rate_caps |= WMI_HOST_RC_TS_FLAG;
+ else if (ht_cap->mcs.rx_mask[1])
+ arg->peer_rate_caps |= WMI_HOST_RC_DS_FLAG;
+
+ for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
+ if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
+ (ht_mcs_mask[i / 8] & BIT(i % 8))) {
+ max_nss = (i / 8) + 1;
+ arg->peer_ht_rates.rates[n++] = i;
+ }
+
+ /* This is a workaround for HT-enabled STAs which break the spec
+ * and have no HT capabilities RX mask (no HT RX MCS map).
+ *
+ * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
+ * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
+ *
+ * Firmware asserts if such situation occurs.
+ */
+ if (n == 0) {
+ arg->peer_ht_rates.num_rates = 8;
+ for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
+ arg->peer_ht_rates.rates[i] = i;
+ } else {
+ arg->peer_ht_rates.num_rates = n;
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "ht peer %pM mcs cnt %d nss %d\n",
+ arg->peer_mac,
+ arg->peer_ht_rates.num_rates,
+ arg->peer_nss);
+}
+
+static int ath11k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ }
+ return 0;
+}
+
+static u16
+ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
+ const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+ mcs_map = ath11k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
+ vht_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
+ break;
+ case 9:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
+ break;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case -1:
+ mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
+ break;
+ }
+
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
+static u8 ath11k_get_nss_160mhz(struct ath11k *ar,
+ u8 max_nss)
+{
+ u8 nss_ratio_info = ar->pdev->cap.nss_ratio_info;
+ u8 max_sup_nss = 0;
+
+ switch (nss_ratio_info) {
+ case WMI_NSS_RATIO_1BY2_NSS:
+ max_sup_nss = max_nss >> 1;
+ break;
+ case WMI_NSS_RATIO_3BY4_NSS:
+ ath11k_warn(ar->ab, "WMI_NSS_RATIO_3BY4_NSS not supported\n");
+ break;
+ case WMI_NSS_RATIO_1_NSS:
+ max_sup_nss = max_nss;
+ break;
+ case WMI_NSS_RATIO_2_NSS:
+ ath11k_warn(ar->ab, "WMI_NSS_RATIO_2_NSS not supported\n");
+ break;
+ default:
+ ath11k_warn(ar->ab, "invalid nss ratio received from firmware: %d\n",
+ nss_ratio_info);
+ break;
+ }
+
+ return max_sup_nss;
+}
+
+static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u16 *vht_mcs_mask;
+ u8 ampdu_factor;
+ u8 max_nss, vht_mcs;
+ int i, vht_nss, nss_idx;
+ bool user_rate_valid = true;
+ u32 rx_nss, tx_nss, nss_160;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!vht_cap->vht_supported)
+ return;
+
+ band = def.chan->band;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath11k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
+ arg->vht_flag = true;
+
+ /* TODO: similar flags required? */
+ arg->vht_capable = true;
+
+ if (def.chan->band == NL80211_BAND_2GHZ)
+ arg->vht_ng_flag = true;
+
+ arg->peer_vht_caps = vht_cap->cap;
+
+ ampdu_factor = (vht_cap->cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+ * zero in VHT IE. Using it would result in degraded throughput.
+ * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+ * it if VHT max_mpdu is smaller.
+ */
+ arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+ (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1);
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask);
+
+ if (vht_nss > sta->deflink.rx_nss) {
+ user_rate_valid = false;
+ for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ if (vht_mcs_mask[nss_idx]) {
+ user_rate_valid = true;
+ break;
+ }
+ }
+ }
+
+ if (!user_rate_valid) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting vht range mcs value to peer supported nss %d for peer %pM\n",
+ sta->deflink.rx_nss, sta->addr);
+ vht_mcs_mask[sta->deflink.rx_nss - 1] = vht_mcs_mask[vht_nss - 1];
+ }
+
+ /* Calculate peer NSS capability from VHT capabilities if STA
+ * supports VHT.
+ */
+ for (i = 0, max_nss = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
+ (2 * i) & 3;
+
+ if (vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED &&
+ vht_mcs_mask[i])
+ max_nss = i + 1;
+ }
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
+ arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
+ arg->rx_mcs_set = ath11k_peer_assoc_h_vht_limit(arg->rx_mcs_set, vht_mcs_mask);
+ arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
+ arg->tx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
+
+ /* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default.
+ * VHT mcs rate 10 and 11 is not supported in 11ac standard.
+ * so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode.
+ */
+ arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
+ arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11;
+
+ if ((arg->tx_mcs_set & IEEE80211_VHT_MCS_NOT_SUPPORTED) ==
+ IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ arg->peer_vht_caps &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+ /* TODO: Check */
+ arg->tx_max_mcs_nss = 0xFF;
+
+ if (arg->peer_phymode == MODE_11AC_VHT160 ||
+ arg->peer_phymode == MODE_11AC_VHT80_80) {
+ tx_nss = ath11k_get_nss_160mhz(ar, max_nss);
+ rx_nss = min(arg->peer_nss, tx_nss);
+ arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE;
+
+ if (!rx_nss) {
+ ath11k_warn(ar->ab, "invalid max_nss\n");
+ return;
+ }
+
+ if (arg->peer_phymode == MODE_11AC_VHT160)
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1);
+ else
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1);
+
+ arg->peer_bw_rxnss_override |= nss_160;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "vht peer %pM max_mpdu %d flags 0x%x nss_override 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags,
+ arg->peer_bw_rxnss_override);
+}
+
+static int ath11k_mac_get_max_he_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_HE_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_11: return BIT(12) - 1;
+ }
+ return 0;
+}
+
+static u16 ath11k_peer_assoc_h_he_limit(u16 tx_mcs_set,
+ const u16 he_mcs_limit[NL80211_HE_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) {
+ mcs_map = ath11k_mac_get_max_he_mcs_map(tx_mcs_set, nss) &
+ he_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0 ... 7:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ case 9:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_9;
+ break;
+ case 10:
+ case 11:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_11;
+ break;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case -1:
+ mcs = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ break;
+ }
+
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
+static bool
+ath11k_peer_assoc_h_he_masked(const u16 *he_mcs_mask)
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++)
+ if (he_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static void ath11k_peer_assoc_h_he(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ enum nl80211_band band;
+ u16 he_mcs_mask[NL80211_HE_NSS_MAX];
+ u8 max_nss, he_mcs;
+ u16 he_tx_mcs = 0, v = 0;
+ int i, he_nss, nss_idx;
+ bool user_rate_valid = true;
+ u32 rx_nss, tx_nss, nss_160;
+ u8 ampdu_factor, rx_mcs_80, rx_mcs_160;
+ u16 mcs_160_map, mcs_80_map;
+ bool support_160;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!he_cap->has_he)
+ return;
+
+ band = def.chan->band;
+ memcpy(he_mcs_mask, arvif->bitrate_mask.control[band].he_mcs,
+ sizeof(he_mcs_mask));
+
+ if (ath11k_peer_assoc_h_he_masked(he_mcs_mask))
+ return;
+
+ arg->he_flag = true;
+ support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G);
+
+ /* Supported HE-MCS and NSS Set of peer he_cap is intersection with self he_cp */
+ mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+
+ /* Initialize rx_mcs_160 to 9 which is an invalid value */
+ rx_mcs_160 = 9;
+ if (support_160) {
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
+
+ if (mcs_160 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
+ rx_mcs_160 = i + 1;
+ break;
+ }
+ }
+ }
+
+ /* Initialize rx_mcs_80 to 9 which is an invalid value */
+ rx_mcs_80 = 9;
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
+
+ if (mcs_80 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
+ rx_mcs_80 = i + 1;
+ break;
+ }
+ }
+
+ if (support_160)
+ max_nss = min(rx_mcs_80, rx_mcs_160);
+ else
+ max_nss = rx_mcs_80;
+
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+
+ memcpy_and_pad(&arg->peer_he_cap_macinfo,
+ sizeof(arg->peer_he_cap_macinfo),
+ he_cap->he_cap_elem.mac_cap_info,
+ sizeof(he_cap->he_cap_elem.mac_cap_info),
+ 0);
+ memcpy_and_pad(&arg->peer_he_cap_phyinfo,
+ sizeof(arg->peer_he_cap_phyinfo),
+ he_cap->he_cap_elem.phy_cap_info,
+ sizeof(he_cap->he_cap_elem.phy_cap_info),
+ 0);
+ arg->peer_he_ops = vif->bss_conf.he_oper.params;
+
+ /* the top most byte is used to indicate BSS color info */
+ arg->peer_he_ops &= 0xffffff;
+
+ /* As per section 26.6.1 11ax Draft5.0, if the Max AMPDU Exponent Extension
+ * in HE cap is zero, use the arg->peer_max_mpdu as calculated while parsing
+ * VHT caps(if VHT caps is present) or HT caps (if VHT caps is not present).
+ *
+ * For non-zero value of Max AMPDU Extponent Extension in HE MAC caps,
+ * if a HE STA sends VHT cap and HE cap IE in assoc request then, use
+ * MAX_AMPDU_LEN_FACTOR as 20 to calculate max_ampdu length.
+ * If a HE STA that does not send VHT cap, but HE and HT cap in assoc
+ * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu
+ * length.
+ */
+ ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3],
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
+
+ if (ampdu_factor) {
+ if (sta->deflink.vht_cap.vht_supported)
+ arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+ else if (sta->deflink.ht_cap.ht_supported)
+ arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+ }
+
+ if (he_cap->he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ int bit = 7;
+ int nss, ru;
+
+ arg->peer_ppet.numss_m1 = he_cap->ppe_thres[0] &
+ IEEE80211_PPE_THRES_NSS_MASK;
+ arg->peer_ppet.ru_bit_mask =
+ (he_cap->ppe_thres[0] &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
+
+ for (nss = 0; nss <= arg->peer_ppet.numss_m1; nss++) {
+ for (ru = 0; ru < 4; ru++) {
+ u32 val = 0;
+ int i;
+
+ if ((arg->peer_ppet.ru_bit_mask & BIT(ru)) == 0)
+ continue;
+ for (i = 0; i < 6; i++) {
+ val >>= 1;
+ val |= ((he_cap->ppe_thres[bit / 8] >>
+ (bit % 8)) & 0x1) << 5;
+ bit++;
+ }
+ arg->peer_ppet.ppet16_ppet8_ru3_ru0[nss] |=
+ val << (ru * 6);
+ }
+ }
+ }
+
+ if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES)
+ arg->twt_responder = true;
+ if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
+ arg->twt_requester = true;
+
+ he_nss = ath11k_mac_max_he_nss(he_mcs_mask);
+
+ if (he_nss > sta->deflink.rx_nss) {
+ user_rate_valid = false;
+ for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ if (he_mcs_mask[nss_idx]) {
+ user_rate_valid = true;
+ break;
+ }
+ }
+ }
+
+ if (!user_rate_valid) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting he range mcs value to peer supported nss %d for peer %pM\n",
+ sta->deflink.rx_nss, sta->addr);
+ he_mcs_mask[sta->deflink.rx_nss - 1] = he_mcs_mask[he_nss - 1];
+ }
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+
+ arg->peer_he_mcs_count++;
+ he_tx_mcs = v;
+ }
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
+
+ arg->peer_he_mcs_count++;
+ if (!he_tx_mcs)
+ he_tx_mcs = v;
+ fallthrough;
+
+ default:
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
+
+ arg->peer_he_mcs_count++;
+ if (!he_tx_mcs)
+ he_tx_mcs = v;
+ break;
+ }
+
+ /* Calculate peer NSS capability from HE capabilities if STA
+ * supports HE.
+ */
+ for (i = 0, max_nss = 0; i < NL80211_HE_NSS_MAX; i++) {
+ he_mcs = he_tx_mcs >> (2 * i) & 3;
+
+ /* In case of fixed rates, MCS Range in he_tx_mcs might have
+ * unsupported range, with he_mcs_mask set, so check either of them
+ * to find nss.
+ */
+ if (he_mcs != IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ he_mcs_mask[i])
+ max_nss = i + 1;
+ }
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+
+ if (arg->peer_phymode == MODE_11AX_HE160 ||
+ arg->peer_phymode == MODE_11AX_HE80_80) {
+ tx_nss = ath11k_get_nss_160mhz(ar, max_nss);
+ rx_nss = min(arg->peer_nss, tx_nss);
+ arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE;
+
+ if (!rx_nss) {
+ ath11k_warn(ar->ab, "invalid max_nss\n");
+ return;
+ }
+
+ if (arg->peer_phymode == MODE_11AX_HE160)
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1);
+ else
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1);
+
+ arg->peer_bw_rxnss_override |= nss_160;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "he peer %pM nss %d mcs cnt %d nss_override 0x%x\n",
+ sta->addr, arg->peer_nss,
+ arg->peer_he_mcs_count,
+ arg->peer_bw_rxnss_override);
+}
+
+static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 ampdu_factor;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+
+ if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa)
+ return;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ arg->bw_40 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa);
+ arg->peer_mpdu_density =
+ ath11k_parse_mpdudensity(FIELD_GET(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START,
+ arg->peer_he_caps_6ghz));
+
+ /* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of
+ * receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value
+ * indicated by the Maximum A-MPDU Length Exponent Extension field in the HE
+ * Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz
+ * Band Capabilities element in the 6 GHz band.
+ *
+ * Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and
+ * factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability.
+ */
+ ampdu_factor = FIELD_GET(IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK,
+ he_cap->he_cap_elem.mac_cap_info[3]) +
+ FIELD_GET(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP,
+ arg->peer_he_caps_6ghz);
+
+ arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+}
+
+static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ int smps;
+
+ if (!ht_cap->ht_supported && !sta->deflink.he_6ghz_capa.capa)
+ return;
+
+ if (ht_cap->ht_supported) {
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ } else {
+ smps = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
+ }
+
+ switch (smps) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ arg->static_mimops_flag = true;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ arg->dynamic_mimops_flag = true;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ arg->spatial_mux_flag = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath11k_peer_assoc_h_qos(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ if (sta->wme) {
+ /* TODO: Check WME vs QoS */
+ arg->is_wme_set = true;
+ arg->qos_flag = true;
+ }
+
+ if (sta->wme && sta->uapsd_queues) {
+ /* TODO: Check WME vs QoS */
+ arg->is_wme_set = true;
+ arg->apsd_flag = true;
+ arg->peer_rate_caps |= WMI_HOST_RC_UAPSD_FLAG;
+ }
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (sta->wme) {
+ arg->is_wme_set = true;
+ arg->qos_flag = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "peer %pM qos %d\n",
+ sta->addr, arg->qos_flag);
+}
+
+static int ath11k_peer_assoc_qos_ap(struct ath11k *ar,
+ struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ap_ps_params params;
+ u32 max_sp;
+ u32 uapsd;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ params.vdev_id = arvif->vdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "uapsd_queues 0x%x max_sp %d\n",
+ sta->uapsd_queues, sta->max_sp);
+
+ uapsd = 0;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
+
+ max_sp = 0;
+ if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
+ max_sp = sta->max_sp;
+
+ params.param = WMI_AP_PS_PEER_PARAM_UAPSD;
+ params.value = uapsd;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ params.param = WMI_AP_PS_PEER_PARAM_MAX_SP;
+ params.value = max_sp;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ /* TODO revisit during testing */
+ params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE;
+ params.value = DISABLE_SIFS_RESPONSE_TRIGGER;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD;
+ params.value = DISABLE_SIFS_RESPONSE_TRIGGER;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ ath11k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d\n",
+ params.param, arvif->vdev_id, ret);
+ return ret;
+}
+
+static bool ath11k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+{
+ return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
+ ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+}
+
+static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (sta->deflink.vht_cap.cap &
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+ return MODE_11AC_VHT160;
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
+ return MODE_11AC_VHT80_80;
+ default:
+ /* not sure if this is a valid case? */
+ return MODE_11AC_VHT160;
+ }
+ }
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AC_VHT80;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AC_VHT40;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AC_VHT20;
+
+ return MODE_UNKNOWN;
+}
+
+static enum wmi_phy_mode ath11k_mac_get_phymode_he(struct ath11k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ return MODE_11AX_HE160;
+ else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ return MODE_11AX_HE80_80;
+ /* not sure if this is a valid case? */
+ return MODE_11AX_HE160;
+ }
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AX_HE80;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AX_HE40;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AX_HE20;
+
+ return MODE_UNKNOWN;
+}
+
+static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
+ enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ if (sta->deflink.he_cap.has_he &&
+ !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ phymode = MODE_11AX_HE80_2G;
+ else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AX_HE40_2G;
+ else
+ phymode = MODE_11AX_HE20_2G;
+ } else if (sta->deflink.vht_cap.vht_supported &&
+ !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->deflink.ht_cap.ht_supported &&
+ !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NG_HT40;
+ else
+ phymode = MODE_11NG_HT20;
+ } else if (ath11k_mac_sta_has_ofdm_only(sta)) {
+ phymode = MODE_11G;
+ } else {
+ phymode = MODE_11B;
+ }
+ break;
+ case NL80211_BAND_5GHZ:
+ case NL80211_BAND_6GHZ:
+ /* Check HE first */
+ if (sta->deflink.he_cap.has_he &&
+ !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
+ phymode = ath11k_mac_get_phymode_he(ar, sta);
+ } else if (sta->deflink.vht_cap.vht_supported &&
+ !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ phymode = ath11k_mac_get_phymode_vht(ar, sta);
+ } else if (sta->deflink.ht_cap.ht_supported &&
+ !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NA_HT40;
+ else
+ phymode = MODE_11NA_HT20;
+ } else {
+ phymode = MODE_11A;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "peer %pM phymode %s\n",
+ sta->addr, ath11k_wmi_phymode_str(phymode));
+
+ arg->peer_phymode = phymode;
+ WARN_ON(phymode == MODE_UNKNOWN);
+}
+
+static void ath11k_peer_assoc_prepare(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg,
+ bool reassoc)
+{
+ struct ath11k_sta *arsta;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arsta = ath11k_sta_to_arsta(sta);
+
+ memset(arg, 0, sizeof(*arg));
+
+ reinit_completion(&ar->peer_assoc_done);
+
+ arg->peer_new_assoc = !reassoc;
+ ath11k_peer_assoc_h_basic(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_crypto(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_rates(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_phymode(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_ht(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_vht(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_he(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_he_6ghz(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_qos(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_smps(sta, arg);
+
+ arsta->peer_nss = arg->peer_nss;
+
+ /* TODO: amsdu_disable req? */
+}
+
+static int ath11k_setup_peer_smps(struct ath11k *ar, struct ath11k_vif *arvif,
+ const u8 *addr,
+ const struct ieee80211_sta_ht_cap *ht_cap,
+ u16 he_6ghz_capa)
+{
+ int smps;
+
+ if (!ht_cap->ht_supported && !he_6ghz_capa)
+ return 0;
+
+ if (ht_cap->ht_supported) {
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ } else {
+ smps = FIELD_GET(IEEE80211_HE_6GHZ_CAP_SM_PS, he_6ghz_capa);
+ }
+
+ if (smps >= ARRAY_SIZE(ath11k_smps_map))
+ return -EINVAL;
+
+ return ath11k_wmi_set_peer_param(ar, addr, arvif->vdev_id,
+ WMI_PEER_MIMO_PS_STATE,
+ ath11k_smps_map[smps]);
+}
+
+static bool ath11k_mac_set_he_txbf_conf(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 param, value;
+ int ret;
+
+ if (!arvif->vif->bss_conf.he_support)
+ return true;
+
+ param = WMI_VDEV_PARAM_SET_HEMU_MODE;
+ value = 0;
+ if (arvif->vif->bss_conf.he_su_beamformer) {
+ value |= FIELD_PREP(HE_MODE_SU_TX_BFER, HE_SU_BFER_ENABLE);
+ if (arvif->vif->bss_conf.he_mu_beamformer &&
+ arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ value |= FIELD_PREP(HE_MODE_MU_TX_BFER, HE_MU_BFER_ENABLE);
+ }
+
+ if (arvif->vif->type != NL80211_IFTYPE_MESH_POINT) {
+ value |= FIELD_PREP(HE_MODE_DL_OFDMA, HE_DL_MUOFDMA_ENABLE) |
+ FIELD_PREP(HE_MODE_UL_OFDMA, HE_UL_MUOFDMA_ENABLE);
+
+ if (arvif->vif->bss_conf.he_full_ul_mumimo)
+ value |= FIELD_PREP(HE_MODE_UL_MUMIMO, HE_UL_MUMIMO_ENABLE);
+
+ if (arvif->vif->bss_conf.he_su_beamformee)
+ value |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE);
+ }
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d HE MU mode: %d\n",
+ arvif->vdev_id, ret);
+ return false;
+ }
+
+ param = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE;
+ value = FIELD_PREP(HE_VHT_SOUNDING_MODE, HE_VHT_SOUNDING_MODE_ENABLE) |
+ FIELD_PREP(HE_TRIG_NONTRIG_SOUNDING_MODE,
+ HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE);
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d sounding mode: %d\n",
+ arvif->vdev_id, ret);
+ return false;
+ }
+ return true;
+}
+
+static bool ath11k_mac_vif_recalc_sta_he_txbf(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta_he_cap *he_cap)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_he_cap_elem he_cap_elem = {};
+ struct ieee80211_sta_he_cap *cap_band = NULL;
+ struct cfg80211_chan_def def;
+ u32 param = WMI_VDEV_PARAM_SET_HEMU_MODE;
+ u32 hemode = 0;
+ int ret;
+
+ if (!vif->bss_conf.he_support)
+ return true;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return false;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return false;
+
+ if (def.chan->band == NL80211_BAND_2GHZ)
+ cap_band = &ar->mac.iftype[NL80211_BAND_2GHZ][vif->type].he_cap;
+ else
+ cap_band = &ar->mac.iftype[NL80211_BAND_5GHZ][vif->type].he_cap;
+
+ memcpy(&he_cap_elem, &cap_band->he_cap_elem, sizeof(he_cap_elem));
+
+ if (HECAP_PHY_SUBFME_GET(he_cap_elem.phy_cap_info)) {
+ if (HECAP_PHY_SUBFMR_GET(he_cap->he_cap_elem.phy_cap_info))
+ hemode |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE);
+ if (HECAP_PHY_MUBFMR_GET(he_cap->he_cap_elem.phy_cap_info))
+ hemode |= FIELD_PREP(HE_MODE_MU_TX_BFEE, HE_MU_BFEE_ENABLE);
+ }
+
+ if (vif->type != NL80211_IFTYPE_MESH_POINT) {
+ hemode |= FIELD_PREP(HE_MODE_DL_OFDMA, HE_DL_MUOFDMA_ENABLE) |
+ FIELD_PREP(HE_MODE_UL_OFDMA, HE_UL_MUOFDMA_ENABLE);
+
+ if (HECAP_PHY_ULMUMIMO_GET(he_cap_elem.phy_cap_info))
+ if (HECAP_PHY_ULMUMIMO_GET(he_cap->he_cap_elem.phy_cap_info))
+ hemode |= FIELD_PREP(HE_MODE_UL_MUMIMO,
+ HE_UL_MUMIMO_ENABLE);
+
+ if (FIELD_GET(HE_MODE_MU_TX_BFEE, hemode))
+ hemode |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE);
+
+ if (FIELD_GET(HE_MODE_MU_TX_BFER, hemode))
+ hemode |= FIELD_PREP(HE_MODE_SU_TX_BFER, HE_SU_BFER_ENABLE);
+ }
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, hemode);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit vdev param txbf 0x%x: %d\n",
+ hemode, ret);
+ return false;
+ }
+
+ return true;
+}
+
+static void ath11k_bss_assoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct peer_assoc_params peer_arg;
+ struct ieee80211_sta *ap_sta;
+ struct ath11k_peer *peer;
+ bool is_auth = false;
+ struct ieee80211_sta_he_cap he_cap;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %i assoc bssid %pM aid %d\n",
+ arvif->vdev_id, arvif->bssid, arvif->aid);
+
+ rcu_read_lock();
+
+ ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!ap_sta) {
+ ath11k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ /* he_cap here is updated at assoc success for sta mode only */
+ he_cap = ap_sta->deflink.he_cap;
+
+ ath11k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false);
+
+ rcu_read_unlock();
+
+ if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
+ ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
+ arvif->vdev_id, bss_conf->bssid);
+ return;
+ }
+
+ peer_arg.is_assoc = true;
+
+ ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
+ bss_conf->bssid, arvif->vdev_id, ret);
+ return;
+ }
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ return;
+ }
+
+ ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid,
+ &ap_sta->deflink.ht_cap,
+ le16_to_cpu(ap_sta->deflink.he_6ghz_capa.capa));
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ WARN_ON(arvif->is_up);
+
+ arvif->aid = vif->cfg.aid;
+ ether_addr_copy(arvif->bssid, bss_conf->bssid);
+
+ ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid,
+ NULL, 0, 0);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d up: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+ arvif->rekey_data.enable_offload = false;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "vdev %d up (associated) bssid %pM aid %d\n",
+ arvif->vdev_id, bss_conf->bssid, vif->cfg.aid);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid);
+ if (peer && peer->is_authorized)
+ is_auth = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (is_auth) {
+ ret = ath11k_wmi_set_peer_param(ar, arvif->bssid,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret);
+ }
+
+ ret = ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
+ &bss_conf->he_obss_pd);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n",
+ arvif->vdev_id, ret);
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_DTIM_POLICY,
+ WMI_DTIM_POLICY_STICK);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set vdev %d dtim policy: %d\n",
+ arvif->vdev_id, ret);
+
+ ath11k_mac_11d_scan_stop_all(ar->ab);
+}
+
+static void ath11k_bss_disassoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %i disassoc bssid %pM\n",
+ arvif->vdev_id, arvif->bssid);
+
+ ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to down vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+
+ memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data));
+
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+}
+
+static u32 ath11k_mac_get_rate_hw_value(int bitrate)
+{
+ u32 preamble;
+ u16 hw_value;
+ int rate;
+ size_t i;
+
+ if (ath11k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ for (i = 0; i < ARRAY_SIZE(ath11k_legacy_rates); i++) {
+ if (ath11k_legacy_rates[i].bitrate != bitrate)
+ continue;
+
+ hw_value = ath11k_legacy_rates[i].hw_value;
+ rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
+
+ return rate;
+ }
+
+ return -EINVAL;
+}
+
+static void ath11k_recalculate_mgmt_rate(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ const struct ieee80211_supported_band *sband;
+ u8 basic_rate_idx;
+ int hw_rate_code;
+ u32 vdev_param;
+ u16 bitrate;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ sband = ar->hw->wiphy->bands[def->chan->band];
+ basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ bitrate = sband->bitrates[basic_rate_idx].bitrate;
+
+ hw_rate_code = ath11k_mac_get_rate_hw_value(bitrate);
+ if (hw_rate_code < 0) {
+ ath11k_warn(ar->ab, "bitrate not supported %d\n", bitrate);
+ return;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+
+ /* For WCN6855, firmware will clear this param when vdev starts, hence
+ * cache it here so that we can reconfigure it once vdev starts.
+ */
+ ar->hw_rate_code = hw_rate_code;
+
+ vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
+}
+
+static int ath11k_mac_fils_discovery(struct ath11k_vif *arvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ath11k *ar = arvif->ar;
+ struct sk_buff *tmpl;
+ int ret;
+ u32 interval;
+ bool unsol_bcast_probe_resp_enabled = false;
+
+ if (info->fils_discovery.max_interval) {
+ interval = info->fils_discovery.max_interval;
+
+ tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif);
+ if (tmpl)
+ ret = ath11k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id,
+ tmpl);
+ } else if (info->unsol_bcast_probe_resp_interval) {
+ unsol_bcast_probe_resp_enabled = 1;
+ interval = info->unsol_bcast_probe_resp_interval;
+
+ tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw,
+ arvif->vif);
+ if (tmpl)
+ ret = ath11k_wmi_probe_resp_tmpl(ar, arvif->vdev_id,
+ tmpl);
+ } else { /* Disable */
+ return ath11k_wmi_fils_discovery(ar, arvif->vdev_id, 0, false);
+ }
+
+ if (!tmpl) {
+ ath11k_warn(ar->ab,
+ "mac vdev %i failed to retrieve %s template\n",
+ arvif->vdev_id, (unsol_bcast_probe_resp_enabled ?
+ "unsolicited broadcast probe response" :
+ "FILS discovery"));
+ return -EPERM;
+ }
+ kfree_skb(tmpl);
+
+ if (!ret)
+ ret = ath11k_wmi_fils_discovery(ar, arvif->vdev_id, interval,
+ unsol_bcast_probe_resp_enabled);
+
+ return ret;
+}
+
+static int ath11k_mac_config_obss_pd(struct ath11k *ar,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ u32 bitmap[2], param_id, param_val, pdev_id;
+ int ret;
+ s8 non_srg_th = 0, srg_th = 0;
+
+ pdev_id = ar->pdev->pdev_id;
+
+ /* Set and enable SRG/non-SRG OBSS PD Threshold */
+ param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD;
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
+ ret = ath11k_wmi_pdev_set_param(ar, param_id, 0, pdev_id);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set obss_pd_threshold for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "obss pd sr_ctrl %x non_srg_thres %u srg_max %u\n",
+ he_obss_pd->sr_ctrl, he_obss_pd->non_srg_max_offset,
+ he_obss_pd->max_offset);
+
+ param_val = 0;
+
+ if (he_obss_pd->sr_ctrl &
+ IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED) {
+ non_srg_th = ATH11K_OBSS_PD_MAX_THRESHOLD;
+ } else {
+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+ non_srg_th = (ATH11K_OBSS_PD_MAX_THRESHOLD +
+ he_obss_pd->non_srg_max_offset);
+ else
+ non_srg_th = ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD;
+
+ param_val |= ATH11K_OBSS_PD_NON_SRG_EN;
+ }
+
+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
+ srg_th = ATH11K_OBSS_PD_MAX_THRESHOLD + he_obss_pd->max_offset;
+ param_val |= ATH11K_OBSS_PD_SRG_EN;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ param_val |= ATH11K_OBSS_PD_THRESHOLD_IN_DBM;
+ param_val |= FIELD_PREP(GENMASK(15, 8), srg_th);
+ } else {
+ non_srg_th -= ATH11K_DEFAULT_NOISE_FLOOR;
+ /* SRG not supported and threshold in dB */
+ param_val &= ~(ATH11K_OBSS_PD_SRG_EN |
+ ATH11K_OBSS_PD_THRESHOLD_IN_DBM);
+ }
+
+ param_val |= (non_srg_th & GENMASK(7, 0));
+ ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set obss_pd_threshold for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Enable OBSS PD for all access category */
+ param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC;
+ param_val = 0xf;
+ ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set obss_pd_per_ac for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Set SR Prohibit */
+ param_id = WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT;
+ param_val = !!(he_obss_pd->sr_ctrl &
+ IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED);
+ ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set sr_prohibit for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ if (!test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT,
+ ar->ab->wmi_ab.svc_map))
+ return 0;
+
+ /* Set SRG BSS Color Bitmap */
+ memcpy(bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap));
+ ret = ath11k_wmi_pdev_set_srg_bss_color_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set bss_color_bitmap for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Set SRG Partial BSSID Bitmap */
+ memcpy(bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap));
+ ret = ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set partial_bssid_bitmap for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ memset(bitmap, 0xff, sizeof(bitmap));
+
+ /* Enable all BSS Colors for SRG */
+ ret = ath11k_wmi_pdev_srg_obss_color_enable_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set srg_color_en_bitmap pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Enable all partial BSSID mask for SRG */
+ ret = ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set srg_bssid_en_bitmap pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Enable all BSS Colors for non-SRG */
+ ret = ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set non_srg_color_en_bitmap pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Enable all partial BSSID mask for non-SRG */
+ ret = ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set non_srg_bssid_en_bitmap pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool ath11k_mac_supports_station_tpc(struct ath11k *ar,
+ struct ath11k_vif *arvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ test_bit(WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT, ar->ab->wmi_ab.svc_map) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE &&
+ chandef->chan &&
+ chandef->chan->band == NL80211_BAND_6GHZ;
+}
+
+static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ u32 param_id, param_value;
+ enum nl80211_band band;
+ u32 vdev_param;
+ int mcast_rate;
+ u32 preamble;
+ u16 hw_value;
+ u16 bitrate;
+ int ret = 0;
+ u8 rateidx;
+ u32 rate, param;
+ u32 ipv4_cnt;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ arvif->beacon_interval = info->beacon_int;
+
+ param_id = WMI_VDEV_PARAM_BEACON_INTERVAL;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ arvif->beacon_interval);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set beacon interval for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Beacon interval: %d set for VDEV: %d\n",
+ arvif->beacon_interval, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_BEACON) {
+ param_id = WMI_PDEV_PARAM_BEACON_TX_MODE;
+ param_value = WMI_BEACON_STAGGERED_MODE;
+ ret = ath11k_wmi_pdev_set_param(ar, param_id,
+ param_value, ar->pdev->pdev_id);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set staggered beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+
+ if (!arvif->do_not_send_tmpl || !arvif->bcca_zero_sent) {
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to update bcn template: %d\n",
+ ret);
+ }
+
+ if (arvif->bcca_zero_sent)
+ arvif->do_not_send_tmpl = true;
+ else
+ arvif->do_not_send_tmpl = false;
+
+ if (vif->bss_conf.he_support) {
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_BA_MODE,
+ WMI_BA_MODE_BUFFER_SIZE_256);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set BA BUFFER SIZE 256 for VDEV: %d\n",
+ arvif->vdev_id);
+ }
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
+ arvif->dtim_period = info->dtim_period;
+
+ param_id = WMI_VDEV_PARAM_DTIM_PERIOD;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ arvif->dtim_period);
+
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set dtim period for VDEV %d: %i\n",
+ arvif->vdev_id, ret);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "DTIM period: %d set for VDEV: %d\n",
+ arvif->dtim_period, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_SSID &&
+ vif->type == NL80211_IFTYPE_AP) {
+ arvif->u.ap.ssid_len = vif->cfg.ssid_len;
+ if (vif->cfg.ssid_len)
+ memcpy(arvif->u.ap.ssid, vif->cfg.ssid,
+ vif->cfg.ssid_len);
+ arvif->u.ap.hidden_ssid = info->hidden_ssid;
+ }
+
+ if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ if (info->enable_beacon)
+ ath11k_mac_set_he_txbf_conf(arvif);
+ ath11k_control_beaconing(arvif, info);
+
+ if (arvif->is_up && vif->bss_conf.he_support &&
+ vif->bss_conf.he_oper.params) {
+ param_id = WMI_VDEV_PARAM_HEOPS_0_31;
+ param_value = vif->bss_conf.he_oper.params;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "he oper param: %x set for VDEV: %d\n",
+ param_value, arvif->vdev_id);
+
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set he oper params %x for VDEV %d: %i\n",
+ param_value, arvif->vdev_id, ret);
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ u32 cts_prot;
+
+ cts_prot = !!(info->use_cts_prot);
+ param_id = WMI_VDEV_PARAM_PROTECTION_MODE;
+
+ if (arvif->is_started) {
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, cts_prot);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set CTS prot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set CTS prot: %d for VDEV: %d\n",
+ cts_prot, arvif->vdev_id);
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "defer protection mode setup, vdev is not ready yet\n");
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ u32 slottime;
+
+ if (info->use_short_slot)
+ slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
+
+ else
+ slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
+
+ param_id = WMI_VDEV_PARAM_SLOT_TIME;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, slottime);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set erp slot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set slottime: %d for VDEV: %d\n",
+ slottime, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ u32 preamble;
+
+ if (info->use_short_preamble)
+ preamble = WMI_VDEV_PREAMBLE_SHORT;
+ else
+ preamble = WMI_VDEV_PREAMBLE_LONG;
+
+ param_id = WMI_VDEV_PARAM_PREAMBLE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, preamble);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set preamble for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set preamble: %d for VDEV: %d\n",
+ preamble, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc)
+ ath11k_bss_assoc(hw, vif, info);
+ else
+ ath11k_bss_disassoc(hw, vif);
+ }
+
+ if (changed & BSS_CHANGED_TXPOWER) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev_id %i txpower %d\n",
+ arvif->vdev_id, info->txpower);
+ arvif->txpower = info->txpower;
+ ath11k_mac_txpower_recalc(ar);
+ }
+
+ if (changed & BSS_CHANGED_PS &&
+ ar->ab->hw_params.supports_sta_ps) {
+ arvif->ps = vif->cfg.ps;
+
+ ret = ath11k_mac_config_ps(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to setup ps on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_MCAST_RATE &&
+ !ath11k_mac_vif_chan(arvif->vif, &def)) {
+ band = def.chan->band;
+ mcast_rate = vif->bss_conf.mcast_rate[band];
+
+ if (mcast_rate > 0)
+ rateidx = mcast_rate - 1;
+ else
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+
+ if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
+ rateidx += ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+
+ bitrate = ath11k_legacy_rates[rateidx].bitrate;
+ hw_value = ath11k_legacy_rates[rateidx].hw_value;
+
+ if (ath11k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "vdev %d mcast_rate %x\n",
+ arvif->vdev_id, rate);
+
+ vdev_param = WMI_VDEV_PARAM_MCAST_DATA_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set mcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ vdev_param = WMI_VDEV_PARAM_BCAST_DATA_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set bcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES &&
+ !ath11k_mac_vif_chan(arvif->vif, &def))
+ ath11k_recalculate_mgmt_rate(ar, vif, &def);
+
+ if (changed & BSS_CHANGED_TWT) {
+ struct wmi_twt_enable_params twt_params = {};
+
+ if (info->twt_requester || info->twt_responder) {
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id,
+ &twt_params);
+ } else {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ }
+ }
+
+ if (changed & BSS_CHANGED_HE_OBSS_PD)
+ ath11k_mac_config_obss_pd(ar, &info->he_obss_pd);
+
+ if (changed & BSS_CHANGED_HE_BSS_COLOR) {
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = ath11k_wmi_send_obss_color_collision_cfg_cmd(
+ ar, arvif->vdev_id, info->he_bss_color.color,
+ ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS,
+ info->he_bss_color.enabled);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ param_id = WMI_VDEV_PARAM_BSS_COLOR;
+ if (info->he_bss_color.enabled)
+ param_value = info->he_bss_color.color <<
+ IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET;
+ else
+ param_value = IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ param_value);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set bss color param on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "bss color param 0x%x set on vdev %i\n",
+ param_value, arvif->vdev_id);
+ } else if (vif->type == NL80211_IFTYPE_STATION) {
+ ret = ath11k_wmi_send_bss_color_change_enable_cmd(ar,
+ arvif->vdev_id,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to enable bss color change on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ ret = ath11k_wmi_send_obss_color_collision_cfg_cmd(
+ ar, arvif->vdev_id, 0,
+ ATH11K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS, 1);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+ }
+
+ if (changed & BSS_CHANGED_FTM_RESPONDER &&
+ arvif->ftm_responder != info->ftm_responder &&
+ test_bit(WMI_TLV_SERVICE_RTT, ar->ab->wmi_ab.svc_map) &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ arvif->ftm_responder = info->ftm_responder;
+ param = WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ arvif->ftm_responder);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set ftm responder %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_FILS_DISCOVERY ||
+ changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP)
+ ath11k_mac_fils_discovery(arvif, info);
+
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ ipv4_cnt = min(vif->cfg.arp_addr_cnt, ATH11K_IPV4_MAX_COUNT);
+ memcpy(arvif->arp_ns_offload.ipv4_addr,
+ vif->cfg.arp_addr_list,
+ ipv4_cnt * sizeof(u32));
+ memcpy(arvif->arp_ns_offload.mac_addr, vif->addr, ETH_ALEN);
+ arvif->arp_ns_offload.ipv4_count = ipv4_cnt;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n",
+ vif->cfg.arp_addr_cnt,
+ vif->addr, arvif->arp_ns_offload.ipv4_addr);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+void __ath11k_mac_scan_finish(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ if (ar->scan.is_roc && ar->scan.roc_notify)
+ ieee80211_remain_on_channel_expired(ar->hw);
+ fallthrough;
+ case ATH11K_SCAN_STARTING:
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = ((ar->scan.state ==
+ ATH11K_SCAN_ABORTING) ||
+ (ar->scan.state ==
+ ATH11K_SCAN_STARTING)),
+ };
+
+ ieee80211_scan_completed(ar->hw, &info);
+ }
+
+ ar->scan.state = ATH11K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ cancel_delayed_work(&ar->scan.timeout);
+ complete_all(&ar->scan.completed);
+ break;
+ }
+}
+
+void ath11k_mac_scan_finish(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ __ath11k_mac_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath11k_scan_stop(struct ath11k *ar)
+{
+ struct scan_cancel_param arg = {
+ .req_type = WLAN_SCAN_CANCEL_SINGLE,
+ .scan_id = ATH11K_SCAN_ID,
+ };
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* TODO: Fill other STOP Params */
+ arg.pdev_id = ar->pdev->pdev_id;
+
+ ret = ath11k_wmi_send_scan_stop_cmd(ar, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop wmi scan: %d\n", ret);
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ar->ab,
+ "failed to receive scan abort comple: timed out\n");
+ ret = -ETIMEDOUT;
+ } else if (ret > 0) {
+ ret = 0;
+ }
+
+out:
+ /* Scan state should be updated upon scan completion but in case
+ * firmware fails to deliver the event (for whatever reason) it is
+ * desired to clean up scan state anyway. Firmware may have just
+ * dropped the scan completion event delivery due to transport pipe
+ * being overflown with data and/or it can recover on its own before
+ * next scan request is submitted.
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state != ATH11K_SCAN_IDLE)
+ __ath11k_mac_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static void ath11k_scan_abort(struct ath11k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ /* This can happen if timeout worker kicked in and called
+ * abortion while scan completion was being processed.
+ */
+ break;
+ case ATH11K_SCAN_STARTING:
+ case ATH11K_SCAN_ABORTING:
+ ath11k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d\n",
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ ar->scan.state = ATH11K_SCAN_ABORTING;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath11k_scan_stop(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to abort scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath11k_scan_timeout_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k,
+ scan.timeout.work);
+
+ mutex_lock(&ar->conf_mutex);
+ ath11k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_start_scan(struct ath11k *ar,
+ struct scan_req_params *arg)
+{
+ int ret;
+ unsigned long timeout = 1 * HZ;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ath11k_spectral_get_mode(ar) == ATH11K_SPECTRAL_BACKGROUND)
+ ath11k_spectral_reset_buffer(ar);
+
+ ret = ath11k_wmi_send_scan_start_cmd(ar, arg);
+ if (ret)
+ return ret;
+
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map)) {
+ timeout = 5 * HZ;
+
+ if (ar->supports_6ghz)
+ timeout += 5 * HZ;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.started, timeout);
+ if (ret == 0) {
+ ret = ath11k_scan_stop(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+
+ return -ETIMEDOUT;
+ }
+
+ /* If we failed to start the scan, return error code at
+ * this point. This is probably due to some issue in the
+ * firmware, but no need to wedge the driver due to that...
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH11K_SCAN_IDLE) {
+ spin_unlock_bh(&ar->data_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ return 0;
+}
+
+static void ath11k_mac_fw_stats_reset(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
+ ar->fw_stats.num_vdev_recvd = 0;
+ ar->fw_stats.num_bcn_recvd = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+int ath11k_mac_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param)
+{
+ struct ath11k_base *ab = ar->ab;
+ unsigned long time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_mac_fw_stats_reset(ar);
+
+ reinit_completion(&ar->fw_stats_complete);
+ reinit_completion(&ar->fw_stats_done);
+
+ ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+
+ if (ret) {
+ ath11k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ /* FW stats can get split when exceeding the stats data buffer limit.
+ * In that case, since there is no end marking for the back-to-back
+ * received 'update stats' event, we keep a 3 seconds timeout in case,
+ * fw_stats_done is not marked yet
+ */
+ time_left = wait_for_completion_timeout(&ar->fw_stats_done, 3 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath11k_mac_get_fw_stats(struct ath11k *ar, u32 pdev_id,
+ u32 vdev_id, u32 stats_id)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON)
+ return -ENETDOWN;
+
+ req_param.pdev_id = pdev_id;
+ req_param.vdev_id = vdev_id;
+ req_param.stats_id = stats_id;
+
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
+ if (ret)
+ ath11k_warn(ab, "failed to request fw stats: %d\n", ret);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n",
+ pdev_id, vdev_id, stats_id);
+
+ return ret;
+}
+
+static int ath11k_mac_handle_get_txpower(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ int *dbm)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_fw_stats_pdev *pdev;
+ int ret;
+
+ /* Final Tx power is minimum of Target Power, CTL power, Regulatory
+ * Power, PSD EIRP Power. We just know the Regulatory power from the
+ * regulatory rules obtained. FW knows all these power and sets the min
+ * of these. Hence, we request the FW pdev stats in which FW reports
+ * the minimum of all vdev's channel Tx power.
+ */
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* Firmware doesn't provide Tx power during CAC hence no need to fetch
+ * the stats.
+ */
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))
+ return -EAGAIN;
+
+ ret = ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_PDEV_STAT);
+ if (ret) {
+ ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ goto err_fallback;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ pdev = list_first_entry_or_null(&ar->fw_stats.pdevs,
+ struct ath11k_fw_stats_pdev, list);
+ if (!pdev) {
+ spin_unlock_bh(&ar->data_lock);
+ goto err_fallback;
+ }
+
+ /* tx power is set as 2 units per dBm in FW. */
+ *dbm = pdev->chan_tx_power / 2;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware %d, reported %d dBm\n",
+ pdev->chan_tx_power, *dbm);
+ return 0;
+
+err_fallback:
+ /* We didn't get txpower from FW. Hence, relying on vif->bss_conf.txpower */
+ *dbm = vif->bss_conf.txpower;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n",
+ *dbm);
+ return 0;
+}
+
+static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id,
+ int *dbm)
+{
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath11k_mac_handle_get_txpower(ar, vif, dbm);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_scan_request *req = &hw_req->req;
+ struct scan_req_params *arg = NULL;
+ int ret = 0;
+ int i;
+ u32 scan_timeout;
+
+ /* Firmwares advertising the support of triggering 11D algorithm
+ * on the scan results of a regular scan expects driver to send
+ * WMI_11D_SCAN_START_CMDID before sending WMI_START_SCAN_CMDID.
+ * With this feature, separate 11D scan can be avoided since
+ * regdomain can be determined with the scan results of the
+ * regular scan.
+ */
+ if (ar->state_11d == ATH11K_11D_PREPARING &&
+ test_bit(WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN,
+ ar->ab->wmi_ab.svc_map))
+ ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ ar->scan.state = ATH11K_SCAN_STARTING;
+ ar->scan.is_roc = false;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ret = 0;
+ break;
+ case ATH11K_SCAN_STARTING:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+
+ if (!arg) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ath11k_wmi_start_scan_init(ar, arg);
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH11K_SCAN_ID;
+
+ if (ar->ab->hw_params.single_pdev_only)
+ arg->scan_f_filter_prb_req = 1;
+
+ if (req->ie_len) {
+ arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
+ if (!arg->extraie.ptr) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ arg->extraie.len = req->ie_len;
+ }
+
+ if (req->n_ssids) {
+ arg->num_ssids = req->n_ssids;
+ for (i = 0; i < arg->num_ssids; i++) {
+ arg->ssid[i].length = req->ssids[i].ssid_len;
+ memcpy(&arg->ssid[i].ssid, req->ssids[i].ssid,
+ req->ssids[i].ssid_len);
+ }
+ } else {
+ arg->scan_f_passive = 1;
+ }
+
+ if (req->n_channels) {
+ arg->num_chan = req->n_channels;
+ arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list),
+ GFP_KERNEL);
+
+ if (!arg->chan_list) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ for (i = 0; i < arg->num_chan; i++) {
+ if (test_bit(WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL,
+ ar->ab->wmi_ab.svc_map)) {
+ arg->chan_list[i] =
+ u32_encode_bits(req->channels[i]->center_freq,
+ WMI_SCAN_CONFIG_PER_CHANNEL_MASK);
+
+ /* If NL80211_SCAN_FLAG_COLOCATED_6GHZ is set in scan
+ * flags, then scan all PSC channels in 6 GHz band and
+ * those non-PSC channels where RNR IE is found during
+ * the legacy 2.4/5 GHz scan.
+ * If NL80211_SCAN_FLAG_COLOCATED_6GHZ is not set,
+ * then all channels in 6 GHz will be scanned.
+ */
+ if (req->channels[i]->band == NL80211_BAND_6GHZ &&
+ req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ &&
+ !cfg80211_channel_is_psc(req->channels[i]))
+ arg->chan_list[i] |=
+ WMI_SCAN_CH_FLAG_SCAN_ONLY_IF_RNR_FOUND;
+ } else {
+ arg->chan_list[i] = req->channels[i]->center_freq;
+ }
+ }
+ }
+
+ if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ arg->scan_f_add_spoofed_mac_in_probe = 1;
+ ether_addr_copy(arg->mac_addr.addr, req->mac_addr);
+ ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask);
+ }
+
+ /* if duration is set, default dwell times will be overwritten */
+ if (req->duration) {
+ arg->dwell_time_active = req->duration;
+ arg->dwell_time_active_2g = req->duration;
+ arg->dwell_time_active_6g = req->duration;
+ arg->dwell_time_passive = req->duration;
+ arg->dwell_time_passive_6g = req->duration;
+ arg->burst_duration = req->duration;
+
+ scan_timeout = min_t(u32, arg->max_rest_time *
+ (arg->num_chan - 1) + (req->duration +
+ ATH11K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) *
+ arg->num_chan, arg->max_scan_time);
+ } else {
+ scan_timeout = arg->max_scan_time;
+ }
+
+ /* Add a margin to account for event/command processing */
+ scan_timeout += ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD;
+
+ ret = ath11k_start_scan(ar, arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH11K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(scan_timeout));
+
+exit:
+ if (arg) {
+ kfree(arg->chan_list);
+ kfree(arg->extraie.ptr);
+ kfree(arg);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
+
+ return ret;
+}
+
+static void ath11k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ ath11k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+}
+
+static int ath11k_install_key(struct ath11k_vif *arvif,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd,
+ const u8 *macaddr, u32 flags)
+{
+ int ret;
+ struct ath11k *ar = arvif->ar;
+ struct wmi_vdev_install_key_arg arg = {
+ .vdev_id = arvif->vdev_id,
+ .key_idx = key->keyidx,
+ .key_len = key->keylen,
+ .key_data = key->key,
+ .key_flags = flags,
+ .macaddr = macaddr,
+ };
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ reinit_completion(&ar->install_key_done);
+
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ return 0;
+
+ if (cmd == DISABLE_KEY) {
+ arg.key_cipher = WMI_CIPHER_NONE;
+ arg.key_data = NULL;
+ goto install;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ /* TODO: Re-check if flag is valid */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ arg.key_cipher = WMI_CIPHER_TKIP;
+ arg.key_txmic_len = 8;
+ arg.key_rxmic_len = 8;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_GCM;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ break;
+ default:
+ ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
+ return -EOPNOTSUPP;
+ }
+
+ if (test_bit(ATH11K_FLAG_RAW_MODE, &ar->ab->dev_flags))
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV |
+ IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
+
+install:
+ ret = ath11k_wmi_vdev_install_key(arvif->ar, &arg);
+
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
+ return -ETIMEDOUT;
+
+ return ar->install_key_status ? -EINVAL : 0;
+}
+
+static int ath11k_clear_peer_keys(struct ath11k_vif *arvif,
+ const u8 *addr)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ int first_errno = 0;
+ int ret;
+ int i;
+ u32 flags = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (!peer->keys[i])
+ continue;
+
+ /* key flags are not required to delete the key */
+ ret = ath11k_install_key(arvif, peer->keys[i],
+ DISABLE_KEY, addr, flags);
+ if (ret < 0 && first_errno == 0)
+ first_errno = ret;
+
+ if (ret < 0)
+ ath11k_warn(ab, "failed to remove peer key %d: %d\n",
+ i, ret);
+
+ spin_lock_bh(&ab->base_lock);
+ peer->keys[i] = NULL;
+ spin_unlock_bh(&ab->base_lock);
+ }
+
+ return first_errno;
+}
+
+static int ath11k_set_group_keys(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ const u8 *addr = arvif->bssid;
+ int i, ret, first_errno = 0;
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ struct ieee80211_key_conf *key = peer->keys[i];
+
+ if (!key || (key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ continue;
+
+ ret = ath11k_install_key(arvif, key, SET_KEY, addr,
+ WMI_KEY_GROUP);
+ if (ret < 0 && first_errno == 0)
+ first_errno = ret;
+
+ if (ret < 0)
+ ath11k_warn(ab, "failed to set group key of idx %d for vdev %d: %d\n",
+ i, arvif->vdev_id, ret);
+ }
+
+ return first_errno;
+}
+
+static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ bool is_ap_with_no_sta;
+ const u8 *peer_addr;
+ int ret = 0;
+ u32 flags = 0;
+
+ /* BIP needs to be done in software */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
+ return 1;
+
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ return 1;
+
+ if (key->keyidx > WMI_MAX_KEY_INDEX)
+ return -ENOSPC;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (sta)
+ peer_addr = sta->addr;
+ else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ peer_addr = vif->bss_conf.bssid;
+ else
+ peer_addr = vif->addr;
+
+ key->hw_key_idx = key->keyidx;
+
+ /* the peer should not disappear in mid-way (unless FW goes awry) since
+ * we already hold conf_mutex. we just make sure its there now.
+ */
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+
+ /* flush the fragments cache during key (re)install to
+ * ensure all frags in the new frag list belong to the same key.
+ */
+ if (peer && sta && cmd == SET_KEY)
+ ath11k_peer_frags_flush(ar, peer);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer) {
+ if (cmd == SET_KEY) {
+ ath11k_warn(ab, "cannot install key for non-existent peer %pM\n",
+ peer_addr);
+ ret = -EOPNOTSUPP;
+ goto exit;
+ } else {
+ /* if the peer doesn't exist there is no key to disable
+ * anymore
+ */
+ goto exit;
+ }
+ }
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ flags = WMI_KEY_PAIRWISE;
+ else
+ flags = WMI_KEY_GROUP;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "%s for peer %pM on vdev %d flags 0x%X, type = %d, num_sta %d\n",
+ cmd == SET_KEY ? "SET_KEY" : "DEL_KEY", peer_addr, arvif->vdev_id,
+ flags, arvif->vdev_type, arvif->num_stations);
+
+ /* Allow group key clearing only in AP mode when no stations are
+ * associated. There is a known race condition in firmware where
+ * group addressed packets may be dropped if the key is cleared
+ * and immediately set again during rekey.
+ *
+ * During GTK rekey, mac80211 issues a clear key (if the old key
+ * exists) followed by an install key operation for same key
+ * index. This causes ath11k to send two WMI commands in quick
+ * succession: one to clear the old key and another to install the
+ * new key in the same slot.
+ *
+ * Under certain conditions—especially under high load or time
+ * sensitive scenarios, firmware may process these commands
+ * asynchronously in a way that firmware assumes the key is
+ * cleared whereas hardware has a valid key. This inconsistency
+ * between hardware and firmware leads to group addressed packet
+ * drops after rekey.
+ * Only setting the same key again can restore a valid key in
+ * firmware and allow packets to be transmitted.
+ *
+ * There is a use case where an AP can transition from Secure mode
+ * to open mode without a vdev restart by just deleting all
+ * associated peers and clearing key, Hence allow clear key for
+ * that case alone. Mark arvif->reinstall_group_keys in such cases
+ * and reinstall the same key when the first peer is added,
+ * allowing firmware to recover from the race if it had occurred.
+ */
+
+ is_ap_with_no_sta = (vif->type == NL80211_IFTYPE_AP &&
+ !arvif->num_stations);
+ if (flags == WMI_KEY_PAIRWISE || cmd == SET_KEY || is_ap_with_no_sta) {
+ ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
+ if (ret) {
+ ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key);
+ if (ret) {
+ ath11k_warn(ab, "failed to offload PN replay detection %d\n",
+ ret);
+ goto exit;
+ }
+
+ if (flags == WMI_KEY_GROUP && cmd == SET_KEY && is_ap_with_no_sta)
+ arvif->reinstall_group_keys = true;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+ if (peer && cmd == SET_KEY) {
+ peer->keys[key->keyidx] = key;
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ peer->ucast_keyidx = key->keyidx;
+ peer->sec_type = ath11k_dp_tx_get_encrypt_type(key->cipher);
+ } else {
+ peer->mcast_keyidx = key->keyidx;
+ peer->sec_type_grp = ath11k_dp_tx_get_encrypt_type(key->cipher);
+ }
+ } else if (peer && cmd == DISABLE_KEY) {
+ peer->keys[key->keyidx] = NULL;
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ peer->ucast_keyidx = 0;
+ else
+ peer->mcast_keyidx = 0;
+ } else if (!peer)
+ /* impossible unless FW goes crazy */
+ ath11k_warn(ab, "peer %pM disappeared!\n", peer_addr);
+
+ if (sta) {
+ arsta = ath11k_sta_to_arsta(sta);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (cmd == SET_KEY)
+ arsta->pn_type = HAL_PN_TYPE_WPA;
+ else
+ arsta->pn_type = HAL_PN_TYPE_NONE;
+ break;
+ default:
+ arsta->pn_type = HAL_PN_TYPE_NONE;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int
+ath11k_mac_bitrate_mask_num_ht_rates(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+ num_rates += hweight8(mask->control[band].ht_mcs[i]);
+
+ return num_rates;
+}
+
+static int
+ath11k_mac_bitrate_mask_num_vht_rates(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
+ num_rates += hweight16(mask->control[band].vht_mcs[i]);
+
+ return num_rates;
+}
+
+static int
+ath11k_mac_bitrate_mask_num_he_rates(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++)
+ num_rates += hweight16(mask->control[band].he_mcs[i]);
+
+ return num_rates;
+}
+
+static int
+ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta,
+ const struct cfg80211_bitrate_mask *mask,
+ enum nl80211_band band)
+{
+ struct ath11k *ar = arvif->ar;
+ u8 vht_rate, nss;
+ u32 rate_code;
+ int ret, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ nss = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+ nss = i + 1;
+ vht_rate = ffs(mask->control[band].vht_mcs[i]) - 1;
+ }
+ }
+
+ if (!nss) {
+ ath11k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
+ sta->addr);
+ return -EINVAL;
+ }
+
+ /* Avoid updating invalid nss as fixed rate*/
+ if (nss > sta->deflink.rx_nss)
+ return -EINVAL;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
+ sta->addr);
+
+ rate_code = ATH11K_HW_RATE_CODE(vht_rate, nss - 1,
+ WMI_RATE_PREAMBLE_VHT);
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ rate_code);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to update STA %pM Fixed Rate %d: %d\n",
+ sta->addr, rate_code, ret);
+
+ return ret;
+}
+
+static int
+ath11k_mac_set_peer_he_fixed_rate(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta,
+ const struct cfg80211_bitrate_mask *mask,
+ enum nl80211_band band)
+{
+ struct ath11k *ar = arvif->ar;
+ u8 he_rate, nss;
+ u32 rate_code;
+ int ret, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ nss = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+ if (hweight16(mask->control[band].he_mcs[i]) == 1) {
+ nss = i + 1;
+ he_rate = ffs(mask->control[band].he_mcs[i]) - 1;
+ }
+ }
+
+ if (!nss) {
+ ath11k_warn(ar->ab, "No single he fixed rate found to set for %pM",
+ sta->addr);
+ return -EINVAL;
+ }
+
+ /* Avoid updating invalid nss as fixed rate */
+ if (nss > sta->deflink.rx_nss)
+ return -EINVAL;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "setting fixed he rate for peer %pM, device will not switch to any other selected rates",
+ sta->addr);
+
+ rate_code = ATH11K_HW_RATE_CODE(he_rate, nss - 1,
+ WMI_RATE_PREAMBLE_HE);
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ rate_code);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to update sta %pM fixed rate %d: %d\n",
+ sta->addr, rate_code, ret);
+
+ return ret;
+}
+
+static int
+ath11k_mac_set_peer_ht_fixed_rate(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta,
+ const struct cfg80211_bitrate_mask *mask,
+ enum nl80211_band band)
+{
+ struct ath11k *ar = arvif->ar;
+ u8 ht_rate, nss = 0;
+ u32 rate_code;
+ int ret, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
+ nss = i + 1;
+ ht_rate = ffs(mask->control[band].ht_mcs[i]) - 1;
+ }
+ }
+
+ if (!nss) {
+ ath11k_warn(ar->ab, "No single HT Fixed rate found to set for %pM",
+ sta->addr);
+ return -EINVAL;
+ }
+
+ /* Avoid updating invalid nss as fixed rate*/
+ if (nss > sta->deflink.rx_nss)
+ return -EINVAL;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Setting Fixed HT Rate for peer %pM. Device will not switch to any other selected rates",
+ sta->addr);
+
+ rate_code = ATH11K_HW_RATE_CODE(ht_rate, nss - 1,
+ WMI_RATE_PREAMBLE_HT);
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ rate_code);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to update STA %pM HT Fixed Rate %d: %d\n",
+ sta->addr, rate_code, ret);
+
+ return ret;
+}
+
+static int ath11k_station_assoc(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool reassoc)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct peer_assoc_params peer_arg;
+ int ret = 0;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ struct cfg80211_bitrate_mask *mask;
+ u8 num_ht_rates, num_vht_rates, num_he_rates;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return -EPERM;
+
+ band = def.chan->band;
+ mask = &arvif->bitrate_mask;
+
+ ath11k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+
+ peer_arg.is_assoc = true;
+ ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return -ETIMEDOUT;
+ }
+
+ num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
+ num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask);
+ num_ht_rates = ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask);
+
+ /* If single VHT/HE rate is configured (by set_bitrate_mask()),
+ * peer_assoc will disable VHT/HE. This is now enabled by a peer specific
+ * fixed param.
+ * Note that all other rates and NSS will be disabled for this peer.
+ */
+ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
+ ret = ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ band);
+ if (ret)
+ return ret;
+ } else if (sta->deflink.he_cap.has_he && num_he_rates == 1) {
+ ret = ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
+ band);
+ if (ret)
+ return ret;
+ } else if (sta->deflink.ht_cap.ht_supported && num_ht_rates == 1) {
+ ret = ath11k_mac_set_peer_ht_fixed_rate(arvif, sta, mask,
+ band);
+ if (ret)
+ return ret;
+ }
+
+ /* Re-assoc is run only to update supported rates for given station. It
+ * doesn't make much sense to reconfigure the peer completely.
+ */
+ if (reassoc)
+ return 0;
+
+ ret = ath11k_setup_peer_smps(ar, arvif, sta->addr,
+ &sta->deflink.ht_cap,
+ le16_to_cpu(sta->deflink.he_6ghz_capa.capa));
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations++;
+ ret = ath11k_recalc_rtscts_prot(arvif);
+ if (ret)
+ return ret;
+ }
+
+ if (sta->wme && sta->uapsd_queues) {
+ ret = ath11k_peer_assoc_qos_ap(ar, arvif, sta);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_station_disassoc(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations--;
+ ret = ath11k_recalc_rtscts_prot(arvif);
+ if (ret)
+ return ret;
+ }
+
+ ret = ath11k_clear_peer_keys(arvif, sta->addr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static u32 ath11k_mac_max_nss(const u8 *ht_mcs_mask, const u16 *vht_mcs_mask,
+ const u16 *he_mcs_mask)
+{
+ return max3(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask),
+ ath11k_mac_max_he_nss(he_mcs_mask));
+}
+
+static void ath11k_sta_rc_update_wk(struct work_struct *wk)
+{
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
+ u32 changed, bw, nss, smps, bw_prev;
+ int err, num_ht_rates, num_vht_rates, num_he_rates;
+ const struct cfg80211_bitrate_mask *mask;
+ struct peer_assoc_params peer_arg;
+ enum wmi_phy_mode peer_phymode;
+
+ arsta = container_of(wk, struct ath11k_sta, update_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ if (WARN_ON(ath11k_mac_vif_chan(arvif->vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ changed = arsta->changed;
+ arsta->changed = 0;
+
+ bw = arsta->bw;
+ bw_prev = arsta->bw_prev;
+ nss = arsta->nss;
+ smps = arsta->smps;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_lock(&ar->conf_mutex);
+
+ nss = max_t(u32, 1, nss);
+ nss = min(nss, ath11k_mac_max_nss(ht_mcs_mask, vht_mcs_mask, he_mcs_mask));
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ /* Get the peer phymode */
+ ath11k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg);
+ peer_phymode = peer_arg.peer_phymode;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "update sta %pM peer bw %d phymode %d\n",
+ sta->addr, bw, peer_phymode);
+
+ if (bw > bw_prev) {
+ /* BW is upgraded. In this case we send WMI_PEER_PHYMODE
+ * followed by WMI_PEER_CHWIDTH
+ */
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "BW upgrade for sta %pM new BW %d, old BW %d\n",
+ sta->addr, bw, bw_prev);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_PHYMODE, peer_phymode);
+
+ if (err) {
+ ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n",
+ sta->addr, peer_phymode, err);
+ goto err_rc_bw_changed;
+ }
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_CHWIDTH, bw);
+
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ } else {
+ /* BW is downgraded. In this case we send WMI_PEER_CHWIDTH
+ * followed by WMI_PEER_PHYMODE
+ */
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "BW downgrade for sta %pM new BW %d,old BW %d\n",
+ sta->addr, bw, bw_prev);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_CHWIDTH, bw);
+
+ if (err) {
+ ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ goto err_rc_bw_changed;
+ }
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_PHYMODE, peer_phymode);
+
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n",
+ sta->addr, peer_phymode, err);
+ }
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "update sta %pM nss %d\n",
+ sta->addr, nss);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_NSS, nss);
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
+ sta->addr, nss, err);
+ }
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "update sta %pM smps %d\n",
+ sta->addr, smps);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_MIMO_PS_STATE, smps);
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
+ sta->addr, smps, err);
+ }
+
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ mask = &arvif->bitrate_mask;
+ num_ht_rates = ath11k_mac_bitrate_mask_num_ht_rates(ar, band,
+ mask);
+ num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band,
+ mask);
+ num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band,
+ mask);
+
+ /* Peer_assoc_prepare will reject vht rates in
+ * bitrate_mask if its not available in range format and
+ * sets vht tx_rateset as unsupported. So multiple VHT MCS
+ * setting(eg. MCS 4,5,6) per peer is not supported here.
+ * But, Single rate in VHT mask can be set as per-peer
+ * fixed rate. But even if any HT rates are configured in
+ * the bitrate mask, device will not switch to those rates
+ * when per-peer Fixed rate is set.
+ * TODO: Check RATEMASK_CMDID to support auto rates selection
+ * across HT/VHT and for multiple VHT MCS support.
+ */
+ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
+ ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ band);
+ } else if (sta->deflink.he_cap.has_he && num_he_rates == 1) {
+ ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
+ band);
+ } else if (sta->deflink.ht_cap.ht_supported && num_ht_rates == 1) {
+ ath11k_mac_set_peer_ht_fixed_rate(arvif, sta, mask,
+ band);
+ } else {
+ /* If the peer is non-VHT/HE or no fixed VHT/HE rate
+ * is provided in the new bitrate mask we set the
+ * other rates using peer_assoc command. Also clear
+ * the peer fixed rate settings as it has higher proprity
+ * than peer assoc
+ */
+ err = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ WMI_FIXED_RATE_NONE);
+ if (err)
+ ath11k_warn(ar->ab,
+ "failed to disable peer fixed rate for sta %pM: %d\n",
+ sta->addr, err);
+
+ ath11k_peer_assoc_prepare(ar, arvif->vif, sta,
+ &peer_arg, true);
+
+ peer_arg.is_assoc = false;
+ err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (err)
+ ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, err);
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ }
+ }
+
+err_rc_bw_changed:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath11k_sta_set_4addr_wk(struct work_struct *wk)
+{
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ int ret = 0;
+
+ arsta = container_of(wk, struct ath11k_sta, set_4addr_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "setting USE_4ADDR for peer %pM\n", sta->addr);
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set peer %pM 4addr capability: %d\n",
+ sta->addr, ret);
+}
+
+static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return 0;
+
+ if (ar->num_stations >= ar->max_num_stations)
+ return -ENOBUFS;
+
+ ar->num_stations++;
+ arvif->num_stations++;
+
+ return 0;
+}
+
+static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return;
+
+ ar->num_stations--;
+ arvif->num_stations--;
+}
+
+static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
+ struct ieee80211_sta *sta)
+{
+ u32 bw = WMI_PEER_CHWIDTH_20MHZ;
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw = WMI_PEER_CHWIDTH_40MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ bw = WMI_PEER_CHWIDTH_80MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ bw = WMI_PEER_CHWIDTH_160MHZ;
+ break;
+ default:
+ ath11k_warn(ar->ab, "Invalid bandwidth %d for %pM\n",
+ sta->deflink.bandwidth, sta->addr);
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ }
+
+ return bw;
+}
+
+static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ int ret = 0;
+ s16 txpwr;
+
+ if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
+ txpwr = 0;
+ } else {
+ txpwr = sta->deflink.txpwr.power;
+ if (!txpwr)
+ return -EINVAL;
+ }
+
+ if (txpwr > ATH11K_TX_POWER_MAX_VAL || txpwr < ATH11K_TX_POWER_MIN_VAL)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_USE_FIXED_PWR, txpwr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
+ ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath11k_mac_op_sta_set_4addr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enabled)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+
+ if (enabled && !arsta->use_4addr_set) {
+ ieee80211_queue_work(ar->hw, &arsta->set_4addr_wk);
+ arsta->use_4addr_set = true;
+ }
+}
+
+static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta,
+ u32 changed)
+{
+ struct ieee80211_sta *sta = link_sta->sta;
+ struct ath11k *ar = hw->priv;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_peer *peer;
+ u32 bw, smps;
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ath11k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return;
+ }
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
+ sta->addr, changed, sta->deflink.bandwidth,
+ sta->deflink.rx_nss,
+ sta->deflink.smps_mode);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
+ arsta->bw_prev = arsta->bw;
+ arsta->bw = bw;
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED)
+ arsta->nss = sta->deflink.rx_nss;
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ smps = WMI_PEER_SMPS_PS_NONE;
+
+ switch (sta->deflink.smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_OFF:
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ smps = WMI_PEER_SMPS_STATIC;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ smps = WMI_PEER_SMPS_DYNAMIC;
+ break;
+ default:
+ ath11k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
+ sta->deflink.smps_mode, sta->addr);
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ }
+
+ arsta->smps = smps;
+ }
+
+ arsta->changed |= changed;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(hw, &arsta->update_wk);
+}
+
+static int ath11k_conf_tx_uapsd(struct ath11k *ar, struct ieee80211_vif *vif,
+ u16 ac, bool enable)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ u32 value = 0;
+ int ret = 0;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_VI:
+ value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BE:
+ value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BK:
+ value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
+ break;
+ }
+
+ if (enable)
+ arvif->u.sta.uapsd |= value;
+ else
+ arvif->u.sta.uapsd &= ~value;
+
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_UAPSD,
+ arvif->u.sta.uapsd);
+ if (ret) {
+ ath11k_warn(ar->ab, "could not set uapsd params %d\n", ret);
+ goto exit;
+ }
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
+ else
+ value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY,
+ value);
+ if (ret)
+ ath11k_warn(ar->ab, "could not set rx wake param %d\n", ret);
+
+exit:
+ return ret;
+}
+
+static int ath11k_mac_op_conf_tx_mu_edca(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k *ar = hw->priv;
+ struct wmi_wmm_params_arg *p;
+ int ret;
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ p = &arvif->muedca_params.ac_vo;
+ break;
+ case IEEE80211_AC_VI:
+ p = &arvif->muedca_params.ac_vi;
+ break;
+ case IEEE80211_AC_BE:
+ p = &arvif->muedca_params.ac_be;
+ break;
+ case IEEE80211_AC_BK:
+ p = &arvif->muedca_params.ac_bk;
+ break;
+ default:
+ ath11k_warn(ar->ab, "error ac: %d", ac);
+ return -EINVAL;
+ }
+
+ p->cwmin = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(3, 0));
+ p->cwmax = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(7, 4));
+ p->aifs = u8_get_bits(params->mu_edca_param_rec.aifsn, GENMASK(3, 0));
+ p->txop = params->mu_edca_param_rec.mu_edca_timer;
+
+ ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
+ &arvif->muedca_params,
+ WMI_WMM_PARAM_TYPE_11AX_MU_EDCA);
+ return ret;
+}
+
+static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct wmi_wmm_params_arg *p = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ p = &arvif->wmm_params.ac_vo;
+ break;
+ case IEEE80211_AC_VI:
+ p = &arvif->wmm_params.ac_vi;
+ break;
+ case IEEE80211_AC_BE:
+ p = &arvif->wmm_params.ac_be;
+ break;
+ case IEEE80211_AC_BK:
+ p = &arvif->wmm_params.ac_bk;
+ break;
+ }
+
+ if (WARN_ON(!p)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ p->cwmin = params->cw_min;
+ p->cwmax = params->cw_max;
+ p->aifs = params->aifs;
+ p->txop = params->txop;
+
+ ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
+ &arvif->wmm_params,
+ WMI_WMM_PARAM_TYPE_LEGACY);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
+ goto exit;
+ }
+
+ if (params->mu_edca) {
+ ret = ath11k_mac_op_conf_tx_mu_edca(hw, vif, link_id, ac,
+ params);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set mu_edca params: %d\n", ret);
+ goto exit;
+ }
+ }
+
+ ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
+
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static struct ieee80211_sta_ht_cap
+ath11k_create_ht_cap(struct ath11k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask)
+{
+ int i;
+ struct ieee80211_sta_ht_cap ht_cap = {};
+ u32 ar_vht_cap = ar->pdev->cap.vht_cap;
+
+ if (!(ar_ht_cap & WMI_HT_CAP_ENABLED))
+ return ht_cap;
+
+ ht_cap.ht_supported = 1;
+ ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (ar_ht_cap & WMI_HT_CAP_HT20_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+
+ if (ar_ht_cap & WMI_HT_CAP_HT40_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+ if (ar_ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) {
+ u32 smps;
+
+ smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
+ smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ ht_cap.cap |= smps;
+ }
+
+ if (ar_ht_cap & WMI_HT_CAP_TX_STBC)
+ ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+ if (ar_ht_cap & WMI_HT_CAP_RX_STBC) {
+ u32 stbc;
+
+ stbc = ar_ht_cap;
+ stbc &= WMI_HT_CAP_RX_STBC;
+ stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
+ stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc &= IEEE80211_HT_CAP_RX_STBC;
+
+ ht_cap.cap |= stbc;
+ }
+
+ if (ar_ht_cap & WMI_HT_CAP_RX_LDPC)
+ ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (ar_ht_cap & WMI_HT_CAP_L_SIG_TXOP_PROT)
+ ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
+
+ if (ar_vht_cap & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
+ ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ for (i = 0; i < ar->num_rx_chains; i++) {
+ if (rate_cap_rx_chainmask & BIT(i))
+ ht_cap.mcs.rx_mask[i] = 0xFF;
+ }
+
+ ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+
+ return ht_cap;
+}
+
+static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
+{
+ u32 value = 0;
+ struct ath11k *ar = arvif->ar;
+ int nsts;
+ int sound_dim;
+ u32 vht_cap = ar->pdev->cap.vht_cap;
+ u32 vdev_param = WMI_VDEV_PARAM_TXBF;
+
+ if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
+ nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+ nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
+ }
+
+ if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
+ sound_dim = vht_cap &
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ if (sound_dim > (ar->num_tx_chains - 1))
+ sound_dim = ar->num_tx_chains - 1;
+ value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
+ }
+
+ if (!value)
+ return 0;
+
+ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) {
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
+ }
+
+ /* TODO: SUBFEE not validated in HK, disable here until validated? */
+
+ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) {
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
+ }
+
+ return ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, value);
+}
+
+static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
+{
+ bool subfer, subfee;
+ int sound_dim = 0, nsts = 0;
+
+ subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE));
+ subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
+
+ if (ar->num_tx_chains < 2) {
+ *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
+ subfer = false;
+ }
+
+ if (ar->num_rx_chains < 2) {
+ *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
+ subfee = false;
+ }
+
+ /* If SU Beaformer is not set, then disable MU Beamformer Capability */
+ if (!subfer)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+
+ /* If SU Beaformee is not set, then disable MU Beamformee Capability */
+ if (!subfee)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+
+ sound_dim = (*vht_cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ *vht_cap &= ~IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+
+ nsts = (*vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
+ nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ *vht_cap &= ~IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+
+ /* Enable Sounding Dimension Field only if SU BF is enabled */
+ if (subfer) {
+ if (sound_dim > (ar->num_tx_chains - 1))
+ sound_dim = ar->num_tx_chains - 1;
+
+ sound_dim <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ *vht_cap |= sound_dim;
+ }
+
+ /* Enable Beamformee STS Field only if SU BF is enabled */
+ if (subfee) {
+ nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+ *vht_cap |= nsts;
+ }
+}
+
+static struct ieee80211_sta_vht_cap
+ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask,
+ u32 rate_cap_rx_chainmask)
+{
+ struct ieee80211_sta_vht_cap vht_cap = {};
+ u16 txmcs_map, rxmcs_map;
+ int i;
+
+ vht_cap.vht_supported = 1;
+ vht_cap.cap = ar->pdev->cap.vht_cap;
+
+ if (ar->pdev->cap.nss_ratio_enabled)
+ vht_cap.vht_mcs.tx_highest |=
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
+
+ ath11k_set_vht_txbf_cap(ar, &vht_cap.cap);
+
+ rxmcs_map = 0;
+ txmcs_map = 0;
+ for (i = 0; i < 8; i++) {
+ if (i < ar->num_tx_chains && rate_cap_tx_chainmask & BIT(i))
+ txmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ txmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+
+ if (i < ar->num_rx_chains && rate_cap_rx_chainmask & BIT(i))
+ rxmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ rxmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+ }
+
+ if (rate_cap_tx_chainmask <= 1)
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+
+ vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map);
+ vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map);
+
+ return vht_cap;
+}
+
+static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap,
+ u32 *ht_cap_info)
+{
+ struct ieee80211_supported_band *band;
+ u32 rate_cap_tx_chainmask;
+ u32 rate_cap_rx_chainmask;
+ u32 ht_cap;
+
+ rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift;
+ rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift;
+
+ if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info;
+ if (ht_cap_info)
+ *ht_cap_info = ht_cap;
+ band->ht_cap = ath11k_create_ht_cap(ar, ht_cap,
+ rate_cap_rx_chainmask);
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ (ar->ab->hw_params.single_pdev_only ||
+ !ar->supports_6ghz)) {
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info;
+ if (ht_cap_info)
+ *ht_cap_info = ht_cap;
+ band->ht_cap = ath11k_create_ht_cap(ar, ht_cap,
+ rate_cap_rx_chainmask);
+ band->vht_cap = ath11k_create_vht_cap(ar, rate_cap_tx_chainmask,
+ rate_cap_rx_chainmask);
+ }
+}
+
+static int ath11k_check_chain_mask(struct ath11k *ar, u32 ant, bool is_tx_ant)
+{
+ /* TODO: Check the request chainmask against the supported
+ * chainmask table which is advertised in extented_service_ready event
+ */
+
+ return 0;
+}
+
+static void ath11k_gen_ppe_thresh(struct ath11k_ppe_threshold *fw_ppet,
+ u8 *he_ppet)
+{
+ int nss, ru;
+ u8 bit = 7;
+
+ he_ppet[0] = fw_ppet->numss_m1 & IEEE80211_PPE_THRES_NSS_MASK;
+ he_ppet[0] |= (fw_ppet->ru_bit_mask <<
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK;
+ for (nss = 0; nss <= fw_ppet->numss_m1; nss++) {
+ for (ru = 0; ru < 4; ru++) {
+ u8 val;
+ int i;
+
+ if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0)
+ continue;
+ val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) &
+ 0x3f;
+ val = ((val >> 3) & 0x7) | ((val & 0x7) << 3);
+ for (i = 5; i >= 0; i--) {
+ he_ppet[bit / 8] |=
+ ((val >> i) & 0x1) << ((bit % 8));
+ bit++;
+ }
+ }
+ }
+}
+
+static void
+ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem)
+{
+ u8 m;
+
+ m = IEEE80211_HE_MAC_CAP0_TWT_RES |
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->mac_cap_info[0] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP2_TRS |
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING;
+ he_cap_elem->mac_cap_info[2] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED |
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING;
+ he_cap_elem->mac_cap_info[3] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG |
+ IEEE80211_HE_MAC_CAP4_BQR;
+ he_cap_elem->mac_cap_info[4] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION |
+ IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU |
+ IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING |
+ IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
+ he_cap_elem->mac_cap_info[5] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ he_cap_elem->phy_cap_info[2] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK;
+ he_cap_elem->phy_cap_info[3] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+ he_cap_elem->phy_cap_info[4] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
+ he_cap_elem->phy_cap_info[5] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
+ he_cap_elem->phy_cap_info[6] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP7_PSR_BASED_SR |
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
+ IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
+ he_cap_elem->phy_cap_info[7] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+ he_cap_elem->phy_cap_info[8] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
+ he_cap_elem->phy_cap_info[9] &= ~m;
+}
+
+static __le16 ath11k_mac_setup_he_6ghz_cap(struct ath11k_pdev_cap *pcap,
+ struct ath11k_band_cap *bcap)
+{
+ u8 val;
+
+ bcap->he_6ghz_capa = IEEE80211_HT_MPDU_DENSITY_NONE;
+ if (bcap->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
+ bcap->he_6ghz_capa |=
+ FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS,
+ WLAN_HT_CAP_SM_PS_DYNAMIC);
+ else
+ bcap->he_6ghz_capa |=
+ FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS,
+ WLAN_HT_CAP_SM_PS_DISABLED);
+ val = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
+ pcap->vht_cap);
+ bcap->he_6ghz_capa |=
+ FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP, val);
+ val = FIELD_GET(IEEE80211_VHT_CAP_MAX_MPDU_MASK, pcap->vht_cap);
+ bcap->he_6ghz_capa |=
+ FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN, val);
+ if (pcap->vht_cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN)
+ bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
+ if (pcap->vht_cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN)
+ bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS;
+
+ return cpu_to_le16(bcap->he_6ghz_capa);
+}
+
+static void ath11k_mac_set_hemcsmap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap,
+ struct ieee80211_sta_he_cap *he_cap,
+ int band)
+{
+ u16 txmcs_map, rxmcs_map;
+ u32 i;
+
+ rxmcs_map = 0;
+ txmcs_map = 0;
+ for (i = 0; i < 8; i++) {
+ if (i < ar->num_tx_chains &&
+ (ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+ txmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+ else
+ txmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
+
+ if (i < ar->num_rx_chains &&
+ (ar->cfg_rx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+ rxmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+ else
+ rxmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
+ }
+ he_cap->he_mcs_nss_supp.rx_mcs_80 =
+ cpu_to_le16(rxmcs_map & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_80 =
+ cpu_to_le16(txmcs_map & 0xffff);
+ he_cap->he_mcs_nss_supp.rx_mcs_160 =
+ cpu_to_le16(rxmcs_map & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_160 =
+ cpu_to_le16(txmcs_map & 0xffff);
+ he_cap->he_mcs_nss_supp.rx_mcs_80p80 =
+ cpu_to_le16(rxmcs_map & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_80p80 =
+ cpu_to_le16(txmcs_map & 0xffff);
+}
+
+static int ath11k_mac_copy_he_cap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap,
+ struct ieee80211_sband_iftype_data *data,
+ int band)
+{
+ int i, idx = 0;
+
+ for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+ struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
+ struct ath11k_band_cap *band_cap = &cap->band[band];
+ struct ieee80211_he_cap_elem *he_cap_elem =
+ &he_cap->he_cap_elem;
+
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ break;
+
+ default:
+ continue;
+ }
+
+ data[idx].types_mask = BIT(i);
+ he_cap->has_he = true;
+ memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info,
+ sizeof(he_cap_elem->mac_cap_info));
+ memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info,
+ sizeof(he_cap_elem->phy_cap_info));
+
+ he_cap_elem->mac_cap_info[1] &=
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK;
+
+ he_cap_elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1;
+
+ switch (i) {
+ case NL80211_IFTYPE_AP:
+ he_cap_elem->phy_cap_info[3] &=
+ ~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_STATION:
+ he_cap_elem->mac_cap_info[0] &=
+ ~IEEE80211_HE_MAC_CAP0_TWT_RES;
+ he_cap_elem->mac_cap_info[0] |=
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ ath11k_mac_filter_he_cap_mesh(he_cap_elem);
+ break;
+ }
+
+ ath11k_mac_set_hemcsmap(ar, cap, he_cap, band);
+
+ memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
+ if (he_cap_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
+ ath11k_gen_ppe_thresh(&band_cap->he_ppet,
+ he_cap->ppe_thres);
+
+ if (band == NL80211_BAND_6GHZ) {
+ data[idx].he_6ghz_capa.capa =
+ ath11k_mac_setup_he_6ghz_cap(cap, band_cap);
+ }
+ idx++;
+ }
+
+ return idx;
+}
+
+static void ath11k_mac_setup_he_cap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap)
+{
+ struct ieee80211_supported_band *band;
+ int count;
+
+ if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ count = ath11k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_2GHZ],
+ NL80211_BAND_2GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ _ieee80211_set_sband_iftype_data(band,
+ ar->mac.iftype[NL80211_BAND_2GHZ],
+ count);
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ count = ath11k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_5GHZ],
+ NL80211_BAND_5GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ _ieee80211_set_sband_iftype_data(band,
+ ar->mac.iftype[NL80211_BAND_5GHZ],
+ count);
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ ar->supports_6ghz) {
+ count = ath11k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_6GHZ],
+ NL80211_BAND_6GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_6GHZ];
+ _ieee80211_set_sband_iftype_data(band,
+ ar->mac.iftype[NL80211_BAND_6GHZ],
+ count);
+ }
+}
+
+static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ath11k_check_chain_mask(ar, tx_ant, true))
+ return -EINVAL;
+
+ if (ath11k_check_chain_mask(ar, rx_ant, false))
+ return -EINVAL;
+
+ ar->cfg_tx_chainmask = tx_ant;
+ ar->cfg_rx_chainmask = rx_ant;
+
+ if (ar->state != ATH11K_STATE_ON &&
+ ar->state != ATH11K_STATE_RESTARTED)
+ return 0;
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK,
+ tx_ant, ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n",
+ ret, tx_ant);
+ return ret;
+ }
+
+ ar->num_tx_chains = get_num_chains(tx_ant);
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ rx_ant, ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n",
+ ret, rx_ant);
+ return ret;
+ }
+
+ ar->num_rx_chains = get_num_chains(rx_ant);
+
+ /* Reload HT/VHT/HE capability */
+ ath11k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
+ ath11k_mac_setup_he_cap(ar, &ar->pdev->cap);
+
+ return 0;
+}
+
+static void ath11k_mgmt_over_wmi_tx_drop(struct ath11k *ar, struct sk_buff *skb)
+{
+ int num_mgmt;
+
+ ieee80211_free_txskb(ar->hw, skb);
+
+ num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
+
+ if (num_mgmt < 0)
+ WARN_ON_ONCE(1);
+
+ if (!num_mgmt)
+ wake_up(&ar->txmgmt_empty_waitq);
+}
+
+static void ath11k_mac_tx_mgmt_free(struct ath11k *ar, int buf_id)
+{
+ struct sk_buff *msdu;
+ struct ieee80211_tx_info *info;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ msdu = idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ if (!msdu)
+ return;
+
+ dma_unmap_single(ar->ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ ath11k_mgmt_over_wmi_tx_drop(ar, msdu);
+}
+
+int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+{
+ struct ath11k *ar = ctx;
+
+ ath11k_mac_tx_mgmt_free(ar, buf_id);
+
+ return 0;
+}
+
+static int ath11k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
+{
+ struct ieee80211_vif *vif = ctx;
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb);
+ struct ath11k *ar = skb_cb->ar;
+
+ if (skb_cb->vif == vif)
+ ath11k_mac_tx_mgmt_free(ar, buf_id);
+
+ return 0;
+}
+
+static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct ieee80211_tx_info *info;
+ enum hal_encrypt_type enctype;
+ unsigned int mic_len;
+ dma_addr_t paddr;
+ int buf_id;
+ int ret;
+
+ ATH11K_SKB_CB(skb)->ar = ar;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
+ ATH11K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "tx mgmt frame, buf id %d\n", buf_id);
+
+ if (buf_id < 0)
+ return -ENOSPC;
+
+ info = IEEE80211_SKB_CB(skb);
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ if (!(skb_cb->flags & ATH11K_SKB_CIPHER_SET))
+ ath11k_warn(ab, "WMI management tx frame without ATH11K_SKB_CIPHER_SET");
+
+ enctype = ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
+ mic_len = ath11k_dp_rx_crypto_mic_len(ar, enctype);
+ skb_put(skb, mic_len);
+ }
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr)) {
+ ath11k_warn(ab, "failed to DMA map mgmt Tx buffer\n");
+ ret = -EIO;
+ goto err_free_idr;
+ }
+
+ ATH11K_SKB_CB(skb)->paddr = paddr;
+
+ ret = ath11k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret);
+ goto err_unmap_buf;
+ }
+
+ return 0;
+
+err_unmap_buf:
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr,
+ skb->len, DMA_TO_DEVICE);
+err_free_idr:
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ return ret;
+}
+
+static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL)
+ ath11k_mgmt_over_wmi_tx_drop(ar, skb);
+}
+
+static int ath11k_mac_mgmt_action_frame_fill_elem_data(struct ath11k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ u8 category, *buf, iv_len, action_code, dialog_token;
+ int cur_tx_power, max_tx_power;
+ struct ath11k *ar = arvif->ar;
+ struct cfg80211_chan_def def;
+ struct ath11k_skb_cb *skb_cb;
+ struct ieee80211_mgmt *mgmt;
+ unsigned int remaining_len;
+ bool has_protected;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* make sure category field is present */
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE)
+ return -EINVAL;
+
+ remaining_len = skb->len - IEEE80211_MIN_ACTION_SIZE;
+ has_protected = ieee80211_has_protected(hdr->frame_control);
+
+ /* In case of SW crypto and hdr protected (PMF), packet will already be encrypted,
+ * we can't put in data in this case
+ */
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+ has_protected)
+ return 0;
+
+ mgmt = (struct ieee80211_mgmt *)hdr;
+ buf = (u8 *)&mgmt->u.action;
+
+ /* FCTL_PROTECTED frame might have extra space added for HDR_LEN. Offset that
+ * many bytes if it is there
+ */
+ if (has_protected) {
+ skb_cb = ATH11K_SKB_CB(skb);
+
+ switch (skb_cb->cipher) {
+ /* Cipher suite having flag %IEEE80211_KEY_FLAG_GENERATE_IV_MGMT set in
+ * key needs to be processed. See ath11k_install_key()
+ */
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ iv_len = IEEE80211_CCMP_HDR_LEN;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ iv_len = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (remaining_len < iv_len)
+ return -EINVAL;
+
+ buf += iv_len;
+ remaining_len -= iv_len;
+ }
+
+ category = *buf++;
+ /* category code is already taken care in %IEEE80211_MIN_ACTION_SIZE hence
+ * no need to adjust remaining_len
+ */
+
+ switch (category) {
+ case WLAN_CATEGORY_RADIO_MEASUREMENT:
+ /* need action code and dialog token */
+ if (remaining_len < 2)
+ return -EINVAL;
+
+ /* Packet Format:
+ * Action Code | Dialog Token | Variable Len (based on Action Code)
+ */
+ action_code = *buf++;
+ dialog_token = *buf++;
+ remaining_len -= 2;
+
+ if (ath11k_mac_vif_chan(arvif->vif, &def))
+ return -ENOENT;
+
+ cur_tx_power = arvif->vif->bss_conf.txpower;
+ max_tx_power = min(def.chan->max_reg_power, (int)ar->max_tx_power / 2);
+ ath11k_mac_handle_get_txpower(ar, arvif->vif, &cur_tx_power);
+
+ switch (action_code) {
+ case WLAN_RM_ACTION_LINK_MEASUREMENT_REQUEST:
+ /* need variable fields to be present in len */
+ if (remaining_len < 2)
+ return -EINVAL;
+
+ /* Variable length format as defined in IEEE 802.11-2024,
+ * Figure 9-1187-Link Measurement Request frame Action field
+ * format.
+ * Transmit Power | Max Tx Power
+ * We fill both of these.
+ */
+ *buf++ = cur_tx_power;
+ *buf = max_tx_power;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "RRM: Link Measurement Req dialog_token %u cur_tx_power %d max_tx_power %d\n",
+ dialog_token, cur_tx_power, max_tx_power);
+ break;
+ case WLAN_RM_ACTION_LINK_MEASUREMENT_REPORT:
+ /* need variable fields to be present in len */
+ if (remaining_len < 3)
+ return -EINVAL;
+
+ /* Variable length format as defined in IEEE 802.11-2024,
+ * Figure 9-1188-Link Measurement Report frame Action field format
+ * TPC Report | Variable Fields
+ *
+ * TPC Report Format:
+ * Element ID | Len | Tx Power | Link Margin
+ *
+ * We fill Tx power in the TPC Report (2nd index)
+ */
+ buf[2] = cur_tx_power;
+
+ /* TODO: At present, Link margin data is not present so can't
+ * really fill it now. Once it is available, it can be added
+ * here
+ */
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "RRM: Link Measurement Report dialog_token %u cur_tx_power %d\n",
+ dialog_token, cur_tx_power);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ /* nothing to fill */
+ return 0;
+ }
+
+ return 0;
+}
+
+static int ath11k_mac_mgmt_frame_fill_elem_data(struct ath11k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_is_action(hdr->frame_control))
+ return 0;
+
+ return ath11k_mac_mgmt_action_frame_fill_elem_data(arvif, skb);
+}
+
+static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k, wmi_mgmt_tx_work);
+ struct ath11k_skb_cb *skb_cb;
+ struct ath11k_vif *arvif;
+ struct sk_buff *skb;
+ int ret;
+
+ while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
+ skb_cb = ATH11K_SKB_CB(skb);
+ if (!skb_cb->vif) {
+ ath11k_warn(ar->ab, "no vif found for mgmt frame\n");
+ ath11k_mgmt_over_wmi_tx_drop(ar, skb);
+ continue;
+ }
+
+ arvif = ath11k_vif_to_arvif(skb_cb->vif);
+ mutex_lock(&ar->conf_mutex);
+ if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
+ /* Fill in the data which is required to be filled by the driver
+ * For example: Max Tx power in Link Measurement Request/Report
+ */
+ ret = ath11k_mac_mgmt_frame_fill_elem_data(arvif, skb);
+ if (ret) {
+ /* If we couldn't fill the data due to any reason,
+ * let's not discard transmitting the packet.
+ */
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Failed to fill the required data for the mgmt packet err %d\n",
+ ret);
+ }
+
+ ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
+ arvif->vdev_id, ret);
+ ath11k_mgmt_over_wmi_tx_drop(ar, skb);
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "tx mgmt frame, vdev_id %d\n",
+ arvif->vdev_id);
+ }
+ } else {
+ ath11k_warn(ar->ab,
+ "dropping mgmt frame for vdev %d, is_started %d\n",
+ arvif->vdev_id,
+ arvif->is_started);
+ ath11k_mgmt_over_wmi_tx_drop(ar, skb);
+ }
+ mutex_unlock(&ar->conf_mutex);
+ }
+}
+
+static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
+ bool is_prb_rsp)
+{
+ struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ /* Drop probe response packets when the pending management tx
+ * count has reached a certain threshold, so as to prioritize
+ * other mgmt packets like auth and assoc to be sent on time
+ * for establishing successful connections.
+ */
+ if (is_prb_rsp &&
+ atomic_read(&ar->num_pending_mgmt_tx) > ATH11K_PRB_RSP_DROP_THRESHOLD) {
+ ath11k_warn(ar->ab,
+ "dropping probe response as pending queue is almost full\n");
+ return -ENOSPC;
+ }
+
+ if (skb_queue_len_lockless(q) >= ATH11K_TX_MGMT_NUM_PENDING_MAX) {
+ ath11k_warn(ar->ab, "mgmt tx queue is full\n");
+ return -ENOSPC;
+ }
+
+ skb_queue_tail(q, skb);
+ atomic_inc(&ar->num_pending_mgmt_tx);
+ queue_work(ar->ab->workqueue_aux, &ar->wmi_mgmt_tx_work);
+
+ return 0;
+}
+
+static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct ath11k *ar = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ath11k_sta *arsta = NULL;
+ u32 info_flags = info->flags;
+ bool is_prb_rsp;
+ int ret;
+
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ skb_cb->vif = vif;
+
+ if (key) {
+ skb_cb->cipher = key->cipher;
+ skb_cb->flags |= ATH11K_SKB_CIPHER_SET;
+ }
+
+ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+ skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP;
+ } else if (ieee80211_is_mgmt(hdr->frame_control)) {
+ is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
+ ret = ath11k_mac_mgmt_tx(ar, skb, is_prb_rsp);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to queue management frame %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ return;
+ }
+
+ if (control->sta)
+ arsta = ath11k_sta_to_arsta(control->sta);
+
+ ret = ath11k_dp_tx(ar, arvif, arsta, skb);
+ if (unlikely(ret)) {
+ ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath11k_mac_drain_tx(struct ath11k *ar)
+{
+ /* make sure rcu-protected mac80211 tx path itself is drained */
+ synchronize_net();
+
+ cancel_work_sync(&ar->wmi_mgmt_tx_work);
+ ath11k_mgmt_over_wmi_tx_purge(ar);
+}
+
+static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
+{
+ struct htt_rx_ring_tlv_filter tlv_filter = {};
+ struct ath11k_base *ab = ar->ab;
+ int i, ret = 0;
+ u32 ring_id;
+
+ if (enable) {
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+ if (ath11k_debugfs_rx_filter(ar))
+ tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE,
+ &tlv_filter);
+ }
+
+ if (enable && !ar->ab->hw_params.rxdma1_enable)
+ mod_timer(&ar->ab->mon_reap_timer, jiffies +
+ msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
+
+ return ret;
+}
+
+static void ath11k_mac_wait_reconfigure(struct ath11k_base *ab)
+{
+ int recovery_start_count;
+
+ if (!ab->is_reset)
+ return;
+
+ recovery_start_count = atomic_inc_return(&ab->recovery_start_count);
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery start count %d\n", recovery_start_count);
+
+ if (recovery_start_count == ab->num_radios) {
+ complete(&ab->recovery_start);
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery started success\n");
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "waiting reconfigure...\n");
+
+ wait_for_completion_timeout(&ab->reconfigure_complete,
+ ATH11K_RECONFIGURE_TIMEOUT_HZ);
+}
+
+static int ath11k_mac_op_start(struct ieee80211_hw *hw)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_pdev *pdev = ar->pdev;
+ int ret;
+
+ if (ath11k_ftm_mode) {
+ ath11k_warn(ab, "mac operations not supported in factory test mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ ath11k_mac_drain_tx(ar);
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH11K_STATE_OFF:
+ ar->state = ATH11K_STATE_ON;
+ break;
+ case ATH11K_STATE_RESTARTING:
+ ar->state = ATH11K_STATE_RESTARTED;
+ ath11k_mac_wait_reconfigure(ab);
+ break;
+ case ATH11K_STATE_RESTARTED:
+ case ATH11K_STATE_WEDGED:
+ case ATH11K_STATE_ON:
+ case ATH11K_STATE_FTM:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS,
+ 1, pdev->pdev_id);
+
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
+ pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret);
+ goto err;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) {
+ ret = ath11k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
+ if (ret) {
+ ath11k_err(ab, "failed to set prob req oui: %i\n", ret);
+ goto err;
+ }
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ 0, pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ab, "failed to set ac override for ARP: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ab, "failed to offload radar detection: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_DEFAULT);
+ if (ret) {
+ ath11k_err(ab, "failed to req ppdu stats: %d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+ 1, pdev->pdev_id);
+
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
+ goto err;
+ }
+
+ __ath11k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
+
+ /* TODO: Do we need to enable ANI? */
+
+ ath11k_reg_update_chan_list(ar, false);
+
+ ar->num_started_vdevs = 0;
+ ar->num_created_vdevs = 0;
+ ar->num_peers = 0;
+ ar->allocated_vdev_map = 0;
+
+ /* Configure monitor status ring with default rx_filter to get rx status
+ * such as rssi, rx_duration.
+ */
+ ret = ath11k_mac_config_mon_status_default(ar, true);
+ if (ret) {
+ ath11k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n",
+ ret);
+ goto err;
+ }
+
+ /* Configure the hash seed for hash based reo dest ring selection */
+ ath11k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id);
+
+ /* allow device to enter IMPS */
+ if (ab->hw_params.idle_ps) {
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ 1, pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ab, "failed to enable idle ps: %d\n", ret);
+ goto err;
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
+ &ab->pdevs[ar->pdev_idx]);
+
+ return 0;
+
+err:
+ ar->state = ATH11K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
+{
+ struct ath11k *ar = hw->priv;
+ struct htt_ppdu_stats_info *ppdu_stats, *tmp;
+ struct scan_chan_list_params *params;
+ int ret;
+
+ ath11k_mac_drain_tx(ar);
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath11k_mac_config_mon_status_default(ar, false);
+ if (ret)
+ ath11k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
+ ret);
+
+ clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ ar->state = ATH11K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->channel_update_work);
+ cancel_work_sync(&ar->regd_update_work);
+ cancel_work_sync(&ar->ab->update_11d_work);
+
+ if (ar->state_11d == ATH11K_11D_PREPARING) {
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
+ list_del(&ppdu_stats->list);
+ kfree(ppdu_stats);
+ }
+
+ while ((params = list_first_entry_or_null(&ar->channel_update_queue,
+ struct scan_chan_list_params,
+ list))) {
+ list_del(&params->list);
+ kfree(params);
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
+
+ synchronize_rcu();
+
+ atomic_set(&ar->num_pending_mgmt_tx, 0);
+}
+
+static int ath11k_mac_setup_vdev_params_mbssid(struct ath11k_vif *arvif,
+ u32 *flags, u32 *tx_vdev_id)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_vif *tx_arvif;
+
+ *tx_vdev_id = 0;
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
+ if (!tx_arvif) {
+ *flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP;
+ return 0;
+ }
+
+ if (arvif->vif->bss_conf.nontransmitted) {
+ if (ar->hw->wiphy != tx_arvif->ar->hw->wiphy)
+ return -EINVAL;
+
+ *flags = WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP;
+ *tx_vdev_id = tx_arvif->vdev_id;
+ } else if (tx_arvif == arvif) {
+ *flags = WMI_HOST_VDEV_FLAGS_TRANSMIT_AP;
+ } else {
+ return -EINVAL;
+ }
+
+ if (arvif->vif->bss_conf.ema_ap)
+ *flags |= WMI_HOST_VDEV_FLAGS_EMA_MODE;
+
+ return 0;
+}
+
+static int ath11k_mac_setup_vdev_create_params(struct ath11k_vif *arvif,
+ struct vdev_create_params *params)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_pdev *pdev = ar->pdev;
+ int ret;
+
+ params->if_id = arvif->vdev_id;
+ params->type = arvif->vdev_type;
+ params->subtype = arvif->vdev_subtype;
+ params->pdev_id = pdev->pdev_id;
+ params->mbssid_flags = 0;
+ params->mbssid_tx_vdev_id = 0;
+
+ if (!test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ ret = ath11k_mac_setup_vdev_params_mbssid(arvif,
+ &params->mbssid_flags,
+ &params->mbssid_tx_vdev_id);
+ if (ret)
+ return ret;
+ }
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ params->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
+ params->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ params->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
+ params->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ ar->supports_6ghz) {
+ params->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
+ params->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
+ }
+ return 0;
+}
+
+static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ u32 param_id, param_value;
+ int ret;
+
+ param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
+ if (ath11k_frame_mode != ATH11K_HW_TXRX_ETHERNET ||
+ (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP))
+ vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED |
+ IEEE80211_OFFLOAD_DECAP_ENABLED);
+
+ if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
+ param_value = ATH11K_HW_TXRX_ETHERNET;
+ else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ param_value = ATH11K_HW_TXRX_RAW;
+ else
+ param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
+ arvif->vdev_id, ret);
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ }
+
+ param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE;
+ if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED)
+ param_value = ATH11K_HW_TXRX_ETHERNET;
+ else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ param_value = ATH11K_HW_TXRX_RAW;
+ else
+ param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d rx decap mode: %d\n",
+ arvif->vdev_id, ret);
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED;
+ }
+}
+
+static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ struct ath11k_vif *arvif;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ return true;
+ }
+ }
+ return false;
+}
+
+void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id)
+{
+ struct wmi_11d_scan_start_params param;
+ int ret;
+
+ mutex_lock(&ar->ab->vdev_id_11d_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev id for 11d scan %d\n",
+ ar->vdev_id_11d_scan);
+
+ if (ar->regdom_set_by_user)
+ goto fin;
+
+ if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID)
+ goto fin;
+
+ if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map))
+ goto fin;
+
+ if (ath11k_mac_vif_ap_active_any(ar->ab))
+ goto fin;
+
+ param.vdev_id = vdev_id;
+ param.start_interval_msec = 0;
+ param.scan_period_msec = ATH11K_SCAN_11D_INTERVAL;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "start 11d scan\n");
+
+ ret = ath11k_wmi_send_11d_scan_start_cmd(ar, &param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n",
+ vdev_id, ret);
+ } else {
+ ar->vdev_id_11d_scan = vdev_id;
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ ar->state_11d = ATH11K_11D_RUNNING;
+ }
+
+fin:
+ if (ar->state_11d == ATH11K_11D_PREPARING) {
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
+ mutex_unlock(&ar->ab->vdev_id_11d_lock);
+}
+
+void ath11k_mac_11d_scan_stop(struct ath11k *ar)
+{
+ int ret;
+ u32 vdev_id;
+
+ if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map))
+ return;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "stop 11d scan\n");
+
+ mutex_lock(&ar->ab->vdev_id_11d_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "stop 11d vdev id %d\n",
+ ar->vdev_id_11d_scan);
+
+ if (ar->state_11d == ATH11K_11D_PREPARING) {
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
+ if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID) {
+ vdev_id = ar->vdev_id_11d_scan;
+
+ ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to stopt 11d scan vdev %d ret: %d\n",
+ vdev_id, ret);
+ } else {
+ ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+ }
+ mutex_unlock(&ar->ab->vdev_id_11d_lock);
+}
+
+void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "stop soc 11d scan\n");
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ ath11k_mac_11d_scan_stop(ar);
+ }
+}
+
+static int ath11k_mac_vdev_delete(struct ath11k *ar, struct ath11k_vif *arvif)
+{
+ unsigned long time_left;
+ struct ieee80211_vif *vif = arvif->vif;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath11k_wmi_vdev_delete(ar, arvif->vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to delete WMI vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH11K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "Timeout in receiving vdev delete response\n");
+ return -ETIMEDOUT;
+ }
+
+ ar->ab->free_vdev_map |= 1LL << (arvif->vdev_id);
+ ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
+ ar->num_created_vdevs--;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
+ vif->addr, arvif->vdev_id);
+
+ return ret;
+}
+
+static void ath11k_mac_bcn_tx_work(struct work_struct *work)
+{
+ struct ath11k_vif *arvif = container_of(work, struct ath11k_vif,
+ bcn_tx_work);
+
+ mutex_lock(&arvif->ar->conf_mutex);
+ ath11k_mac_bcn_tx_event(arvif);
+ mutex_unlock(&arvif->ar->conf_mutex);
+}
+
+static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct vdev_create_params vdev_param = {};
+ struct peer_create_params peer_param;
+ u32 param_id, param_value;
+ u16 nss;
+ int i;
+ int ret, fbret;
+ int bit;
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_AP &&
+ ar->num_peers > (ar->max_num_peers - 1)) {
+ ath11k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
+ ret = -ENOBUFS;
+ goto err;
+ }
+
+ if (ar->num_created_vdevs > (TARGET_NUM_VDEVS(ab) - 1)) {
+ ath11k_warn(ab, "failed to create vdev %u, reached max vdev limit %d\n",
+ ar->num_created_vdevs, TARGET_NUM_VDEVS(ab));
+ ret = -EBUSY;
+ goto err;
+ }
+
+ memset(arvif, 0, sizeof(*arvif));
+
+ arvif->ar = ar;
+ arvif->vif = vif;
+
+ INIT_LIST_HEAD(&arvif->list);
+ INIT_WORK(&arvif->bcn_tx_work, ath11k_mac_bcn_tx_work);
+ INIT_DELAYED_WORK(&arvif->connection_loss_work,
+ ath11k_mac_vif_sta_connection_loss_work);
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ arvif->bitrate_mask.control[i].gi = NL80211_TXRATE_FORCE_SGI;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ memset(arvif->bitrate_mask.control[i].he_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].he_mcs));
+ }
+
+ bit = __ffs64(ab->free_vdev_map);
+
+ arvif->vdev_id = bit;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_STATION:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
+ fallthrough;
+ case NL80211_IFTYPE_AP:
+ arvif->vdev_type = WMI_VDEV_TYPE_AP;
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+ ar->monitor_vdev_id = bit;
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "add interface id %d type %d subtype %d map %llx\n",
+ arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
+ ab->free_vdev_map);
+
+ vif->cab_queue = arvif->vdev_id % (ATH11K_HW_MAX_QUEUES - 1);
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = i % (ATH11K_HW_MAX_QUEUES - 1);
+
+ ret = ath11k_mac_setup_vdev_create_params(arvif, &vdev_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to create vdev parameters %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_vdev_create(ar, vif->addr, &vdev_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to create WMI vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ar->num_created_vdevs++;
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM created, vdev_id %d\n",
+ vif->addr, arvif->vdev_id);
+ ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
+ ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
+
+ spin_lock_bh(&ar->data_lock);
+ list_add(&arvif->list, &ar->arvifs);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath11k_mac_op_update_vif_offload(hw, vif);
+
+ nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_NSS, nss);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
+ arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret);
+ goto err_vdev_del;
+ }
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = vif->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ ret = ath11k_peer_create(ar, arvif, NULL, &peer_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ ret = ath11k_mac_set_kickout(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ ath11k_mac_11d_scan_stop_all(ar->ab);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
+ param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d RX wake policy: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+ param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d TX wake threshold: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+ param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d pspoll count: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id,
+ WMI_STA_PS_MODE_DISABLED);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map)) {
+ reinit_completion(&ar->completed_11d_scan);
+ ar->state_11d = ATH11K_11D_PREPARING;
+ }
+ break;
+ case WMI_VDEV_TYPE_MONITOR:
+ set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ break;
+ default:
+ break;
+ }
+
+ arvif->txpower = vif->bss_conf.txpower;
+ ret = ath11k_mac_txpower_recalc(ar);
+ if (ret)
+ goto err_peer_del;
+
+ param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+ param_value = ar->hw->wiphy->rts_threshold;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ ath11k_dp_vdev_tx_attach(ar, arvif);
+
+ if (vif->type != NL80211_IFTYPE_MONITOR &&
+ test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_vdev_create(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to create monitor vdev during add interface: %d",
+ ret);
+ }
+
+ if (ath11k_wmi_supports_6ghz_cc_ext(ar)) {
+ struct cur_regulatory_info *reg_info;
+
+ reg_info = &ab->reg_info_store[ar->pdev_idx];
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "interface added to change reg rules\n");
+ ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_LPI_AP);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+
+err_peer_del:
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ fbret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr);
+ if (fbret) {
+ ath11k_warn(ar->ab, "fallback fail to delete peer addr %pM vdev_id %d ret %d\n",
+ vif->addr, arvif->vdev_id, fbret);
+ goto err;
+ }
+ }
+
+err_vdev_del:
+ ath11k_mac_vdev_delete(ar, arvif);
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_vif_unref(int buf_id, void *skb, void *ctx)
+{
+ struct ieee80211_vif *vif = ctx;
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+
+ if (skb_cb->vif == vif)
+ skb_cb->vif = NULL;
+
+ return 0;
+}
+
+static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_base *ab = ar->ab;
+ int ret;
+ int i;
+
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+ cancel_work_sync(&arvif->bcn_tx_work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "remove interface (vdev %d)\n",
+ arvif->vdev_id);
+
+ ret = ath11k_spectral_vif_stop(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to stop spectral for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ath11k_mac_11d_scan_stop(ar);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr);
+ if (ret)
+ ath11k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ ret = ath11k_mac_vdev_delete(ar, arvif);
+ if (ret) {
+ ath11k_warn(ab, "failed to delete vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ ar->monitor_vdev_id = -1;
+ } else if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags) &&
+ !test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_vdev_delete(ar);
+ if (ret)
+ /* continue even if there's an error */
+ ath11k_warn(ar->ab, "failed to delete vdev monitor during remove interface: %d",
+ ret);
+ }
+
+err_vdev_del:
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath11k_peer_cleanup(ar, arvif->vdev_id);
+
+ idr_for_each(&ar->txmgmt_idr,
+ ath11k_mac_vif_txmgmt_idr_remove, vif);
+
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ spin_lock_bh(&ab->dp.tx_ring[i].tx_idr_lock);
+ idr_for_each(&ab->dp.tx_ring[i].txbuf_idr,
+ ath11k_mac_vif_unref, vif);
+ spin_unlock_bh(&ab->dp.tx_ring[i].tx_idr_lock);
+ }
+
+ /* Recalc txpower for remaining vdev */
+ ath11k_mac_txpower_recalc(ar);
+
+ /* TODO: recalc traffic pause state based on the available vdevs */
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/* FIXME: Has to be verified. */
+#define SUPPORTED_FILTERS \
+ (FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PROBE_REQ | \
+ FIF_FCSFAIL)
+
+static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ *total_flags &= SUPPORTED_FILTERS;
+ ar->filter_flags = *total_flags;
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ *tx_ant = ar->cfg_tx_chainmask;
+ *rx_ant = ar->cfg_rx_chainmask;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int ath11k_mac_op_set_antenna(struct ieee80211_hw *hw, int radio_idx,
+ u32 tx_ant, u32 rx_ant)
+{
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = __ath11k_set_antenna(ar, tx_ant, rx_ant);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath11k *ar = hw->priv;
+ int ret = -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (params->action) {
+ case IEEE80211_AMPDU_RX_START:
+ ret = ath11k_dp_rx_ampdu_start(ar, params);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = ath11k_dp_rx_ampdu_stop(ar, params);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* Tx A-MPDU aggregation offloaded to hw/fw so deny mac80211
+ * Tx aggregation requests.
+ */
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "chanctx add freq %u width %d ptr %p\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ /* TODO: In case of multiple channel context, populate rx_channel from
+ * Rx PPDU desc information.
+ */
+ ar->rx_channel = ctx->def.chan;
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "chanctx remove freq %u width %d ptr %p\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ /* TODO: In case of there is one more channel context left, populate
+ * rx_channel with the channel of that remaining channel context.
+ */
+ ar->rx_channel = NULL;
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
+ struct ieee80211_chanctx_conf *ctx,
+ bool restart)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct wmi_vdev_start_req_arg arg = {};
+ const struct cfg80211_chan_def *chandef = &ctx->def;
+ int ret = 0;
+ unsigned int dfs_cac_time;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.dtim_period = arvif->dtim_period;
+ arg.bcn_intval = arvif->beacon_interval;
+
+ arg.channel.freq = chandef->chan->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
+ arg.channel.mode =
+ ath11k_phymodes[chandef->chan->band][chandef->width];
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = chandef->chan->max_power;
+ arg.channel.max_reg_power = chandef->chan->max_reg_power;
+ arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain;
+
+ arg.pref_tx_streams = ar->num_tx_chains;
+ arg.pref_rx_streams = ar->num_rx_chains;
+
+ arg.mbssid_flags = 0;
+ arg.mbssid_tx_vdev_id = 0;
+ if (test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ ret = ath11k_mac_setup_vdev_params_mbssid(arvif,
+ &arg.mbssid_flags,
+ &arg.mbssid_tx_vdev_id);
+ if (ret)
+ return ret;
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = arvif->u.ap.ssid;
+ arg.ssid_len = arvif->u.ap.ssid_len;
+ arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.channel.chan_radar =
+ !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+
+ arg.channel.freq2_radar = ctx->radar_enabled;
+
+ arg.channel.passive = arg.channel.chan_radar;
+
+ spin_lock_bh(&ab->base_lock);
+ arg.regdomain = ar->ab->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+ }
+
+ arg.channel.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.channel.freq,
+ ath11k_wmi_phymode_str(arg.channel.mode));
+
+ ret = ath11k_wmi_vdev_start(ar, &arg, restart);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to %s WMI vdev %i\n",
+ restart ? "restart" : "start", arg.vdev_id);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to synchronize setup for vdev %i %s: %d\n",
+ arg.vdev_id, restart ? "restart" : "start", ret);
+ return ret;
+ }
+
+ /* TODO: For now we only set TPC power here. However when
+ * channel changes, say CSA, it should be updated again.
+ */
+ if (ath11k_mac_supports_station_tpc(ar, arvif, chandef)) {
+ ath11k_mac_fill_reg_tpc_info(ar, arvif->vif, &arvif->chanctx);
+ ath11k_wmi_send_vdev_set_tpc_power(ar, arvif->vdev_id,
+ &arvif->reg_tpc_info);
+ }
+
+ if (!restart)
+ ar->num_started_vdevs++;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
+
+ /* Enable CAC Flag in the driver by checking the all sub-channel's DFS
+ * state as NL80211_DFS_USABLE which indicates CAC needs to be
+ * done before channel usage. This flags is used to drop rx packets.
+ * during CAC.
+ */
+ /* TODO Set the flag for other interface types as required */
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP && ctx->radar_enabled &&
+ cfg80211_chandef_dfs_usable(ar->hw->wiphy, chandef)) {
+ set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ dfs_cac_time = cfg80211_chandef_dfs_cac_time(ar->hw->wiphy,
+ chandef);
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "cac started dfs_cac_time %u center_freq %d center_freq1 %d for vdev %d\n",
+ dfs_cac_time, arg.channel.freq, chandef->center_freq1,
+ arg.vdev_id);
+ }
+
+ ret = ath11k_mac_set_txbf_conf(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to set txbf conf for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ return 0;
+}
+
+static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath11k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ ar->num_started_vdevs--;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "CAC Stopped for vdev %d\n",
+ arvif->vdev_id);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static int ath11k_mac_vdev_start(struct ath11k_vif *arvif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ return ath11k_mac_vdev_start_restart(arvif, ctx, false);
+}
+
+static int ath11k_mac_vdev_restart(struct ath11k_vif *arvif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ return ath11k_mac_vdev_start_restart(arvif, ctx, true);
+}
+
+struct ath11k_mac_change_chanctx_arg {
+ struct ieee80211_chanctx_conf *ctx;
+ struct ieee80211_vif_chanctx_switch *vifs;
+ int n_vifs;
+ int next_vif;
+};
+
+static void
+ath11k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_mac_change_chanctx_arg *arg = data;
+
+ if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
+ return;
+
+ arg->n_vifs++;
+}
+
+static void
+ath11k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_chanctx_conf *ctx;
+
+ ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
+ if (ctx != arg->ctx)
+ return;
+
+ if (WARN_ON(arg->next_vif == arg->n_vifs))
+ return;
+
+ arg->vifs[arg->next_vif].vif = vif;
+ arg->vifs[arg->next_vif].old_ctx = ctx;
+ arg->vifs[arg->next_vif].new_ctx = ctx;
+ arg->next_vif++;
+}
+
+static void
+ath11k_mac_update_vif_chan(struct ath11k *ar,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif, *tx_arvif;
+ int ret;
+ int i;
+ bool monitor_vif = false;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* Associated channel resources of all relevant vdevs
+ * should be available for the channel switch now.
+ */
+
+ /* TODO: Update ar->rx_channel */
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = ath11k_vif_to_arvif(vifs[i].vif);
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ /* change_chanctx can be called even before vdev_up from
+ * ieee80211_start_ap->ieee80211_vif_use_channel->
+ * ieee80211_recalc_radar_chanctx.
+ *
+ * Firmware expect vdev_restart only if vdev is up.
+ * If vdev is down then it expect vdev_stop->vdev_start.
+ */
+ if (arvif->is_up) {
+ ret = ath11k_mac_vdev_restart(arvif, vifs[i].new_ctx);
+ if (ret) {
+ ath11k_warn(ab, "failed to restart vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ } else {
+ ret = ath11k_mac_vdev_stop(arvif);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+
+ ret = ath11k_mac_vdev_start(arvif, vifs[i].new_ctx);
+ if (ret)
+ ath11k_warn(ab, "failed to start vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ continue;
+ }
+
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ tx_arvif = ath11k_mac_get_tx_arvif(arvif);
+ ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid,
+ tx_arvif ? tx_arvif->bssid : NULL,
+ arvif->vif->bss_conf.bssid_index,
+ 1 << arvif->vif->bss_conf.bssid_indicator);
+ if (ret) {
+ ath11k_warn(ab, "failed to bring vdev up %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ /* Restart the internal monitor vdev on new channel */
+ if (!monitor_vif &&
+ test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor during vif channel update: %d",
+ ret);
+ return;
+ }
+
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor during vif channel update: %d",
+ ret);
+ return;
+ }
+ }
+}
+
+static void
+ath11k_mac_update_active_vif_chan(struct ath11k *ar,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k_mac_change_chanctx_arg arg = { .ctx = ctx };
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_change_chanctx_cnt_iter,
+ &arg);
+ if (arg.n_vifs == 0)
+ return;
+
+ arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), GFP_KERNEL);
+ if (!arg.vifs)
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_change_chanctx_fill_iter,
+ &arg);
+
+ ath11k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
+
+ kfree(arg.vifs);
+}
+
+static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "chanctx change freq %u width %d ptr %p changed %x\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
+
+ /* This shouldn't really happen because channel switching should use
+ * switch_vif_chanctx().
+ */
+ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
+ goto unlock;
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH ||
+ changed & IEEE80211_CHANCTX_CHANGE_RADAR)
+ ath11k_mac_update_active_vif_chan(ar, ctx);
+
+ /* TODO: Recalc radar detection */
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_mac_start_vdev_delay(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ int ret;
+
+ if (WARN_ON(arvif->is_started))
+ return -EBUSY;
+
+ ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx);
+ if (ret) {
+ ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ arvif->chanctx.def.chan->center_freq, ret);
+ return ret;
+ }
+
+ /* Reconfigure hardware rate code since it is cleared by firmware.
+ */
+ if (ar->hw_rate_code > 0) {
+ u32 vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ ar->hw_rate_code);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, 0, ar->mac_addr,
+ NULL, 0, 0);
+ if (ret) {
+ ath11k_warn(ab, "failed put monitor up: %d\n", ret);
+ return ret;
+ }
+ }
+
+ arvif->is_started = true;
+
+ /* TODO: Setup ps and cts/rts protection */
+ return 0;
+}
+
+static int ath11k_mac_stop_vdev_early(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ int ret;
+
+ if (WARN_ON(!arvif->is_started))
+ return -EBUSY;
+
+ ret = ath11k_mac_vdev_stop(arvif);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ arvif->is_started = false;
+
+ /* TODO: Setup ps and cts/rts protection */
+ return 0;
+}
+
+static u8 ath11k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def)
+{
+ if (chan_def->chan->flags & IEEE80211_CHAN_PSD) {
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_20:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80:
+ return 4;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return 8;
+ default:
+ return 1;
+ }
+ } else {
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_20:
+ return 1;
+ case NL80211_CHAN_WIDTH_40:
+ return 2;
+ case NL80211_CHAN_WIDTH_80:
+ return 3;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ return 4;
+ default:
+ return 1;
+ }
+ }
+}
+
+static u16 ath11k_mac_get_6ghz_start_frequency(struct cfg80211_chan_def *chan_def)
+{
+ u16 diff_seq;
+
+ /* It is to get the lowest channel number's center frequency of the chan.
+ * For example,
+ * bandwidth=40 MHz, center frequency is 5965, lowest channel is 1
+ * with center frequency 5955, its diff is 5965 - 5955 = 10.
+ * bandwidth=80 MHz, center frequency is 5985, lowest channel is 1
+ * with center frequency 5955, its diff is 5985 - 5955 = 30.
+ * bandwidth=160 MHz, center frequency is 6025, lowest channel is 1
+ * with center frequency 5955, its diff is 6025 - 5955 = 70.
+ */
+ switch (chan_def->width) {
+ case NL80211_CHAN_WIDTH_160:
+ diff_seq = 70;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ diff_seq = 30;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ diff_seq = 10;
+ break;
+ default:
+ diff_seq = 0;
+ }
+
+ return chan_def->center_freq1 - diff_seq;
+}
+
+static u16 ath11k_mac_get_seg_freq(struct cfg80211_chan_def *chan_def,
+ u16 start_seq, u8 seq)
+{
+ u16 seg_seq;
+
+ /* It is to get the center frequency of the specific bandwidth.
+ * start_seq means the lowest channel number's center frequency.
+ * seq 0/1/2/3 means 20 MHz/40 MHz/80 MHz/160 MHz&80P80.
+ * For example,
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5955 when bandwidth=20 MHz, its diff is 5955 - 5955 = 0.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5965 when bandwidth=40 MHz, its diff is 5965 - 5955 = 10.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 5985 when bandwidth=80 MHz, its diff is 5985 - 5955 = 30.
+ * lowest channel is 1, its center frequency 5955,
+ * center frequency is 6025 when bandwidth=160 MHz, its diff is 6025 - 5955 = 70.
+ */
+ if (chan_def->width == NL80211_CHAN_WIDTH_80P80 && seq == 3)
+ return chan_def->center_freq2;
+
+ seg_seq = 10 * (BIT(seq) - 1);
+ return seg_seq + start_seq;
+}
+
+static void ath11k_mac_get_psd_channel(struct ath11k *ar,
+ u16 step_freq,
+ u16 *start_freq,
+ u16 *center_freq,
+ u8 i,
+ struct ieee80211_channel **temp_chan,
+ s8 *tx_power)
+{
+ /* It is to get the center frequency for each 20 MHz.
+ * For example, if the chan is 160 MHz and center frequency is 6025,
+ * then it include 8 channels, they are 1/5/9/13/17/21/25/29,
+ * channel number 1's center frequency is 5955, it is parameter start_freq.
+ * parameter i is the step of the 8 channels. i is 0~7 for the 8 channels.
+ * the channel 1/5/9/13/17/21/25/29 maps i=0/1/2/3/4/5/6/7,
+ * and maps its center frequency is 5955/5975/5995/6015/6035/6055/6075/6095,
+ * the gap is 20 for each channel, parameter step_freq means the gap.
+ * after get the center frequency of each channel, it is easy to find the
+ * struct ieee80211_channel of it and get the max_reg_power.
+ */
+ *center_freq = *start_freq + i * step_freq;
+ *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq);
+ *tx_power = (*temp_chan)->max_reg_power;
+}
+
+static void ath11k_mac_get_eirp_power(struct ath11k *ar,
+ u16 *start_freq,
+ u16 *center_freq,
+ u8 i,
+ struct ieee80211_channel **temp_chan,
+ struct cfg80211_chan_def *def,
+ s8 *tx_power)
+{
+ /* It is to get the center frequency for 20 MHz/40 MHz/80 MHz/
+ * 160 MHz&80P80 bandwidth, and then plus 10 to the center frequency,
+ * it is the center frequency of a channel number.
+ * For example, when configured channel number is 1.
+ * center frequency is 5965 when bandwidth=40 MHz, after plus 10, it is 5975,
+ * then it is channel number 5.
+ * center frequency is 5985 when bandwidth=80 MHz, after plus 10, it is 5995,
+ * then it is channel number 9.
+ * center frequency is 6025 when bandwidth=160 MHz, after plus 10, it is 6035,
+ * then it is channel number 17.
+ * after get the center frequency of each channel, it is easy to find the
+ * struct ieee80211_channel of it and get the max_reg_power.
+ */
+ *center_freq = ath11k_mac_get_seg_freq(def, *start_freq, i);
+
+ /* For the 20 MHz, its center frequency is same with same channel */
+ if (i != 0)
+ *center_freq += 10;
+
+ *temp_chan = ieee80211_get_channel(ar->hw->wiphy, *center_freq);
+ *tx_power = (*temp_chan)->max_reg_power;
+}
+
+void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ath11k_reg_tpc_power_info *reg_tpc_info = &arvif->reg_tpc_info;
+ struct ieee80211_channel *chan, *temp_chan;
+ u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
+ bool is_psd_power = false, is_tpe_present = false;
+ s8 max_tx_power[ATH11K_NUM_PWR_LEVELS],
+ psd_power, tx_power;
+ s8 eirp_power = 0;
+ u16 start_freq, center_freq;
+
+ chan = ctx->def.chan;
+ start_freq = ath11k_mac_get_6ghz_start_frequency(&ctx->def);
+ pwr_reduction = bss_conf->pwr_reduction;
+
+ if (arvif->reg_tpc_info.num_pwr_levels) {
+ is_tpe_present = true;
+ num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels;
+ } else {
+ num_pwr_levels =
+ ath11k_mac_get_num_pwr_levels(&bss_conf->chanreq.oper);
+ }
+
+ for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) {
+ /* STA received TPE IE*/
+ if (is_tpe_present) {
+ /* local power is PSD power*/
+ if (chan->flags & IEEE80211_CHAN_PSD) {
+ /* Connecting AP is psd power */
+ if (reg_tpc_info->is_psd_power) {
+ is_psd_power = true;
+ ath11k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ psd_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ /* Connecting AP is not psd power */
+ } else {
+ ath11k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ /* convert psd power to EIRP power based
+ * on channel width
+ */
+ tx_power =
+ min_t(s8, tx_power,
+ psd_power + 13 + pwr_lvl_idx * 3);
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ tx_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ }
+ /* local power is not PSD power */
+ } else {
+ /* Connecting AP is psd power */
+ if (reg_tpc_info->is_psd_power) {
+ is_psd_power = true;
+ ath11k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] =
+ reg_tpc_info->tpe[pwr_lvl_idx];
+ /* Connecting AP is not psd power */
+ } else {
+ ath11k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ tx_power,
+ reg_tpc_info->tpe[pwr_lvl_idx]);
+ }
+ }
+ /* STA not received TPE IE */
+ } else {
+ /* local power is PSD power*/
+ if (chan->flags & IEEE80211_CHAN_PSD) {
+ is_psd_power = true;
+ ath11k_mac_get_psd_channel(ar, 20,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &tx_power);
+ psd_power = temp_chan->psd;
+ eirp_power = tx_power;
+ max_tx_power[pwr_lvl_idx] = psd_power;
+ } else {
+ ath11k_mac_get_eirp_power(ar,
+ &start_freq,
+ &center_freq,
+ pwr_lvl_idx,
+ &temp_chan,
+ &ctx->def,
+ &tx_power);
+ max_tx_power[pwr_lvl_idx] = tx_power;
+ }
+ }
+
+ if (is_psd_power) {
+ /* If AP local power constraint is present */
+ if (pwr_reduction)
+ eirp_power = eirp_power - pwr_reduction;
+
+ /* If firmware updated max tx power is non zero, then take
+ * the min of firmware updated ap tx power
+ * and max power derived from above mentioned parameters.
+ */
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "eirp power : %d firmware report power : %d\n",
+ eirp_power, ar->max_allowed_tx_power);
+ /* Firmware reports lower max_allowed_tx_power during vdev
+ * start response. In case of 6 GHz, firmware is not aware
+ * of EIRP power unless driver sets EIRP power through WMI
+ * TPC command. So radio which does not support idle power
+ * save can set maximum calculated EIRP power directly to
+ * firmware through TPC command without min comparison with
+ * vdev start response's max_allowed_tx_power.
+ */
+ if (ar->max_allowed_tx_power && ab->hw_params.idle_ps)
+ eirp_power = min_t(s8,
+ eirp_power,
+ ar->max_allowed_tx_power);
+ } else {
+ /* If AP local power constraint is present */
+ if (pwr_reduction)
+ max_tx_power[pwr_lvl_idx] =
+ max_tx_power[pwr_lvl_idx] - pwr_reduction;
+ /* If firmware updated max tx power is non zero, then take
+ * the min of firmware updated ap tx power
+ * and max power derived from above mentioned parameters.
+ */
+ if (ar->max_allowed_tx_power && ab->hw_params.idle_ps)
+ max_tx_power[pwr_lvl_idx] =
+ min_t(s8,
+ max_tx_power[pwr_lvl_idx],
+ ar->max_allowed_tx_power);
+ }
+ reg_tpc_info->chan_power_info[pwr_lvl_idx].chan_cfreq = center_freq;
+ reg_tpc_info->chan_power_info[pwr_lvl_idx].tx_power =
+ max_tx_power[pwr_lvl_idx];
+ }
+
+ reg_tpc_info->num_pwr_levels = num_pwr_levels;
+ reg_tpc_info->is_psd_power = is_psd_power;
+ reg_tpc_info->eirp_power = eirp_power;
+ reg_tpc_info->ap_power_type =
+ ath11k_reg_ap_pwr_convert(vif->bss_conf.power_type);
+}
+
+static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ieee80211_parsed_tpe_eirp *non_psd = NULL;
+ struct ieee80211_parsed_tpe_psd *psd = NULL;
+ enum wmi_reg_6ghz_client_type client_type;
+ struct cur_regulatory_info *reg_info;
+ u8 local_tpe_count, reg_tpe_count;
+ bool use_local_tpe;
+ int i;
+
+ reg_info = &ab->reg_info_store[ar->pdev_idx];
+ client_type = reg_info->client_type;
+
+ local_tpe_count =
+ bss_conf->tpe.max_local[client_type].valid +
+ bss_conf->tpe.psd_local[client_type].valid;
+ reg_tpe_count =
+ bss_conf->tpe.max_reg_client[client_type].valid +
+ bss_conf->tpe.psd_reg_client[client_type].valid;
+
+ if (!reg_tpe_count && !local_tpe_count) {
+ ath11k_warn(ab,
+ "no transmit power envelope match client power type %d\n",
+ client_type);
+ return;
+ } else if (!reg_tpe_count) {
+ use_local_tpe = true;
+ } else {
+ use_local_tpe = false;
+ }
+
+ if (use_local_tpe) {
+ psd = &bss_conf->tpe.psd_local[client_type];
+ if (!psd->valid)
+ psd = NULL;
+ non_psd = &bss_conf->tpe.max_local[client_type];
+ if (!non_psd->valid)
+ non_psd = NULL;
+ } else {
+ psd = &bss_conf->tpe.psd_reg_client[client_type];
+ if (!psd->valid)
+ psd = NULL;
+ non_psd = &bss_conf->tpe.max_reg_client[client_type];
+ if (!non_psd->valid)
+ non_psd = NULL;
+ }
+
+ if (non_psd && !psd) {
+ arvif->reg_tpc_info.is_psd_power = false;
+ arvif->reg_tpc_info.eirp_power = 0;
+
+ arvif->reg_tpc_info.num_pwr_levels = non_psd->count;
+
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "non PSD power[%d] : %d\n",
+ i, non_psd->power[i]);
+ arvif->reg_tpc_info.tpe[i] = non_psd->power[i] / 2;
+ }
+ }
+
+ if (psd) {
+ arvif->reg_tpc_info.is_psd_power = true;
+ arvif->reg_tpc_info.num_pwr_levels = psd->count;
+
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "TPE PSD power[%d] : %d\n",
+ i, psd->power[i]);
+ arvif->reg_tpc_info.tpe[i] = psd->power[i] / 2;
+ }
+ }
+}
+
+static int
+ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "chanctx assign ptr %p vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ if (ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ ctx->def.chan->band == NL80211_BAND_6GHZ &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ arvif->chanctx = *ctx;
+ ath11k_mac_parse_tx_pwr_env(ar, vif, ctx);
+ }
+
+ /* for QCA6390 bss peer must be created before vdev_start */
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ !ath11k_peer_find_by_vdev_id(ab, arvif->vdev_id)) {
+ memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
+ ret = 0;
+ goto out;
+ }
+
+ if (WARN_ON(arvif->is_started)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d",
+ ret);
+ goto out;
+ }
+
+ arvif->is_started = true;
+ goto out;
+ }
+
+ if (!arvif->is_started) {
+ ret = ath11k_mac_vdev_start(arvif, ctx);
+ if (ret) {
+ ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ ctx->def.chan->center_freq, ret);
+ goto out;
+ }
+
+ arvif->is_started = true;
+ }
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d",
+ ret);
+ goto out;
+ }
+ }
+
+ /* TODO: Setup ps and cts/rts protection */
+
+ ret = 0;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void
+ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_peer *peer;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "chanctx unassign ptr %p vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_addr(ab, ar->mac_addr);
+ spin_unlock_bh(&ab->base_lock);
+ if (peer)
+ ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d",
+ ret);
+ mutex_unlock(&ar->conf_mutex);
+ return;
+ }
+
+ arvif->is_started = false;
+ mutex_unlock(&ar->conf_mutex);
+ return;
+ }
+
+ if (arvif->is_started) {
+ ret = ath11k_mac_vdev_stop(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
+ }
+
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+ ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ ar->num_started_vdevs == 1 &&
+ test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret)
+ /* continue even if there's an error */
+ ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d",
+ ret);
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath11k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "chanctx switch n_vifs %d mode %d\n",
+ n_vifs, mode);
+ ath11k_mac_update_vif_chan(ar, vifs, n_vifs);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int
+ath11k_set_vdev_param_to_all_vifs(struct ath11k *ar, int param, u32 value)
+{
+ struct ath11k_vif *arvif;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting mac vdev %d param %d value %d\n",
+ param, arvif->vdev_id, value);
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set param %d for vdev %d: %d\n",
+ param, arvif->vdev_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+/* mac80211 stores device specific RTS/Fragmentation threshold value,
+ * this is set interface specific to firmware from ath11k driver
+ */
+static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
+{
+ struct ath11k *ar = hw->priv;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+
+ return ath11k_set_vdev_param_to_all_vifs(ar, param_id, value);
+}
+
+static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw,
+ int radio_idx, u32 value)
+{
+ /* Even though there's a WMI vdev param for fragmentation threshold no
+ * known firmware actually implements it. Moreover it is not possible to
+ * rely frame fragmentation to mac80211 because firmware clears the
+ * "more fragments" bit in frame control making it impossible for remote
+ * devices to reassemble frames.
+ *
+ * Hence implement a dummy callback just to say fragmentation isn't
+ * supported. This effectively prevents mac80211 from doing frame
+ * fragmentation in software.
+ */
+ return -EOPNOTSUPP;
+}
+
+static int ath11k_mac_flush_tx_complete(struct ath11k *ar)
+{
+ long time_left;
+ int ret = 0;
+
+ time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
+ (atomic_read(&ar->dp.num_tx_pending) == 0),
+ ATH11K_FLUSH_TIMEOUT);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "failed to flush transmit queue, data pkts pending %d\n",
+ atomic_read(&ar->dp.num_tx_pending));
+ ret = -ETIMEDOUT;
+ }
+
+ time_left = wait_event_timeout(ar->txmgmt_empty_waitq,
+ (atomic_read(&ar->num_pending_mgmt_tx) == 0),
+ ATH11K_FLUSH_TIMEOUT);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "failed to flush mgmt transmit queue, mgmt pkts pending %d\n",
+ atomic_read(&ar->num_pending_mgmt_tx));
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+int ath11k_mac_wait_tx_complete(struct ath11k *ar)
+{
+ ath11k_mac_drain_tx(ar);
+ return ath11k_mac_flush_tx_complete(ar);
+}
+
+static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath11k *ar = hw->priv;
+
+ if (drop)
+ return;
+
+ ath11k_mac_flush_tx_complete(ar);
+}
+
+static bool
+ath11k_mac_has_single_legacy_rate(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+
+ num_rates = hweight32(mask->control[band].legacy);
+
+ if (ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask))
+ return false;
+
+ if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
+ return false;
+
+ if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask))
+ return false;
+
+ return num_rates == 1;
+}
+
+static __le16
+ath11k_mac_get_tx_mcs_map(const struct ieee80211_sta_he_cap *he_cap)
+{
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ return he_cap->he_mcs_nss_supp.tx_mcs_80p80;
+
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ return he_cap->he_mcs_nss_supp.tx_mcs_160;
+
+ return he_cap->he_mcs_nss_supp.tx_mcs_80;
+}
+
+static bool
+ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar,
+ struct ath11k_vif *arvif,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ int *nss)
+{
+ struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+ u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ const struct ieee80211_sta_he_cap *he_cap;
+ u16 he_mcs_map = 0;
+ u8 ht_nss_mask = 0;
+ u8 vht_nss_mask = 0;
+ u8 he_nss_mask = 0;
+ int i;
+
+ /* No need to consider legacy here. Basic rates are always present
+ * in bitrate mask
+ */
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (mask->control[band].ht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].ht_mcs[i] ==
+ sband->ht_cap.mcs.rx_mask[i])
+ ht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (mask->control[band].vht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].vht_mcs[i] ==
+ ath11k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
+ vht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ he_cap = ieee80211_get_he_iftype_cap_vif(sband, arvif->vif);
+ if (!he_cap)
+ return false;
+
+ he_mcs_map = le16_to_cpu(ath11k_mac_get_tx_mcs_map(he_cap));
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+ if (mask->control[band].he_mcs[i] == 0)
+ continue;
+
+ if (mask->control[band].he_mcs[i] ==
+ ath11k_mac_get_max_he_mcs_map(he_mcs_map, i))
+ he_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ if (ht_nss_mask != vht_nss_mask || ht_nss_mask != he_nss_mask)
+ return false;
+
+ if (ht_nss_mask == 0)
+ return false;
+
+ if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
+ return false;
+
+ *nss = fls(ht_nss_mask);
+
+ return true;
+}
+
+static int
+ath11k_mac_get_single_legacy_rate(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ u32 *rate, u8 *nss)
+{
+ int rate_idx;
+ u16 bitrate;
+ u8 preamble;
+ u8 hw_rate;
+
+ if (hweight32(mask->control[band].legacy) != 1)
+ return -EINVAL;
+
+ rate_idx = ffs(mask->control[band].legacy) - 1;
+
+ if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ)
+ rate_idx += ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+
+ hw_rate = ath11k_legacy_rates[rate_idx].hw_value;
+ bitrate = ath11k_legacy_rates[rate_idx].bitrate;
+
+ if (ath11k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ *nss = 1;
+ *rate = ATH11K_HW_RATE_CODE(hw_rate, 0, preamble);
+
+ return 0;
+}
+
+static int
+ath11k_mac_set_fixed_rate_gi_ltf(struct ath11k_vif *arvif, u8 he_gi, u8 he_ltf)
+{
+ struct ath11k *ar = arvif->ar;
+ int ret;
+
+ /* 0.8 = 0, 1.6 = 2 and 3.2 = 3. */
+ if (he_gi && he_gi != 0xFF)
+ he_gi += 1;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_SGI, he_gi);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set he gi %d: %d\n",
+ he_gi, ret);
+ return ret;
+ }
+ /* start from 1 */
+ if (he_ltf != 0xFF)
+ he_ltf += 1;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_HE_LTF, he_ltf);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set he ltf %d: %d\n",
+ he_ltf, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ath11k_mac_set_auto_rate_gi_ltf(struct ath11k_vif *arvif, u16 he_gi, u8 he_ltf)
+{
+ struct ath11k *ar = arvif->ar;
+ int ret;
+ u32 he_ar_gi_ltf;
+
+ if (he_gi != 0xFF) {
+ switch (he_gi) {
+ case NL80211_RATE_INFO_HE_GI_0_8:
+ he_gi = WMI_AUTORATE_800NS_GI;
+ break;
+ case NL80211_RATE_INFO_HE_GI_1_6:
+ he_gi = WMI_AUTORATE_1600NS_GI;
+ break;
+ case NL80211_RATE_INFO_HE_GI_3_2:
+ he_gi = WMI_AUTORATE_3200NS_GI;
+ break;
+ default:
+ ath11k_warn(ar->ab, "invalid he gi: %d\n", he_gi);
+ return -EINVAL;
+ }
+ }
+
+ if (he_ltf != 0xFF) {
+ switch (he_ltf) {
+ case NL80211_RATE_INFO_HE_1XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_1X;
+ break;
+ case NL80211_RATE_INFO_HE_2XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_2X;
+ break;
+ case NL80211_RATE_INFO_HE_4XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_4X;
+ break;
+ default:
+ ath11k_warn(ar->ab, "invalid he ltf: %d\n", he_ltf);
+ return -EINVAL;
+ }
+ }
+
+ he_ar_gi_ltf = he_gi | he_ltf;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_AUTORATE_MISC_CFG,
+ he_ar_gi_ltf);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set he autorate gi %u ltf %u: %d\n",
+ he_gi, he_ltf, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath11k_mac_set_rate_params(struct ath11k_vif *arvif,
+ u32 rate, u8 nss, u8 sgi, u8 ldpc,
+ u8 he_gi, u8 he_ltf, bool he_fixed_rate)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "set rate params vdev %i rate 0x%02x nss 0x%02x sgi 0x%02x ldpc 0x%02x he_gi 0x%02x he_ltf 0x%02x he_fixed_rate %d\n",
+ arvif->vdev_id, rate, nss, sgi, ldpc, he_gi,
+ he_ltf, he_fixed_rate);
+
+ if (!arvif->vif->bss_conf.he_support) {
+ vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
+ rate, ret);
+ return ret;
+ }
+ }
+
+ vdev_param = WMI_VDEV_PARAM_NSS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, nss);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set nss param %d: %d\n",
+ nss, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_LDPC;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, ldpc);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set ldpc param %d: %d\n",
+ ldpc, ret);
+ return ret;
+ }
+
+ if (arvif->vif->bss_conf.he_support) {
+ if (he_fixed_rate) {
+ ret = ath11k_mac_set_fixed_rate_gi_ltf(arvif, he_gi,
+ he_ltf);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate gi ltf: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ ret = ath11k_mac_set_auto_rate_gi_ltf(arvif, he_gi,
+ he_ltf);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set auto rate gi ltf: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ } else {
+ vdev_param = WMI_VDEV_PARAM_SGI;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, sgi);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n",
+ sgi, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static bool
+ath11k_mac_vht_mcs_range_present(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 vht_mcs;
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = mask->control[band].vht_mcs[i];
+
+ switch (vht_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(9) - 1:
+ case BIT(10) - 1:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+ath11k_mac_he_mcs_range_present(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 he_mcs;
+
+ for (i = 0; i < NL80211_HE_NSS_MAX; i++) {
+ he_mcs = mask->control[band].he_mcs[i];
+
+ switch (he_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(10) - 1:
+ case BIT(12) - 1:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void ath11k_mac_set_bitrate_mask_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_vif *arvif = data;
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(ar->hw, &arsta->update_wk);
+}
+
+static void ath11k_mac_disable_peer_fixed_rate(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_vif *arvif = data;
+ struct ath11k *ar = arvif->ar;
+ int ret;
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ WMI_FIXED_RATE_NONE);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to disable peer fixed rate for STA %pM ret %d\n",
+ sta->addr, ret);
+}
+
+static bool
+ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ bool he_fixed_rate = false, vht_fixed_rate = false;
+ struct ath11k_peer *peer;
+ const u16 *vht_mcs_mask, *he_mcs_mask;
+ struct ieee80211_link_sta *deflink;
+ u8 vht_nss, he_nss;
+ bool ret = true;
+
+ vht_mcs_mask = mask->control[band].vht_mcs;
+ he_mcs_mask = mask->control[band].he_mcs;
+
+ if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask) == 1)
+ vht_fixed_rate = true;
+
+ if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask) == 1)
+ he_fixed_rate = true;
+
+ if (!vht_fixed_rate && !he_fixed_rate)
+ return true;
+
+ vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask);
+ he_nss = ath11k_mac_max_he_nss(he_mcs_mask);
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->ab->base_lock);
+ list_for_each_entry(peer, &ar->ab->peers, list) {
+ if (peer->sta) {
+ deflink = &peer->sta->deflink;
+
+ if (vht_fixed_rate && (!deflink->vht_cap.vht_supported ||
+ deflink->rx_nss < vht_nss)) {
+ ret = false;
+ goto out;
+ }
+
+ if (he_fixed_rate && (!deflink->he_cap.has_he ||
+ deflink->rx_nss < he_nss)) {
+ ret = false;
+ goto out;
+ }
+ }
+ }
+
+out:
+ spin_unlock_bh(&ar->ab->base_lock);
+ rcu_read_unlock();
+ return ret;
+}
+
+static int
+ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ struct ath11k_pdev_cap *cap;
+ struct ath11k *ar = arvif->ar;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
+ u8 he_ltf = 0;
+ u8 he_gi = 0;
+ u32 rate;
+ u8 nss;
+ u8 sgi;
+ u8 ldpc;
+ int single_nss;
+ int ret;
+ int num_rates;
+ bool he_fixed_rate = false;
+
+ if (ath11k_mac_vif_chan(vif, &def))
+ return -EPERM;
+
+ band = def.chan->band;
+ cap = &ar->pdev->cap;
+ ht_mcs_mask = mask->control[band].ht_mcs;
+ vht_mcs_mask = mask->control[band].vht_mcs;
+ he_mcs_mask = mask->control[band].he_mcs;
+ ldpc = !!(cap->band[band].ht_cap_info & WMI_HT_CAP_TX_LDPC);
+
+ sgi = mask->control[band].gi;
+ if (sgi == NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+
+ he_gi = mask->control[band].he_gi;
+ he_ltf = mask->control[band].he_ltf;
+
+ /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
+ * requires passing at least one of used basic rates along with them.
+ * Fixed rate setting across different preambles(legacy, HT, VHT) is
+ * not supported by the FW. Hence use of FIXED_RATE vdev param is not
+ * suitable for setting single HT/VHT rates.
+ * But, there could be a single basic rate passed from userspace which
+ * can be done through the FIXED_RATE param.
+ */
+ if (ath11k_mac_has_single_legacy_rate(ar, band, mask)) {
+ ret = ath11k_mac_get_single_legacy_rate(ar, band, mask, &rate,
+ &nss);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ ieee80211_iterate_stations_mtx(ar->hw,
+ ath11k_mac_disable_peer_fixed_rate,
+ arvif);
+ } else if (ath11k_mac_bitrate_mask_get_single_nss(ar, arvif, band, mask,
+ &single_nss)) {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = single_nss;
+ mutex_lock(&ar->conf_mutex);
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_set_bitrate_mask_iter,
+ arvif);
+ mutex_unlock(&ar->conf_mutex);
+ } else {
+ rate = WMI_FIXED_RATE_NONE;
+
+ if (!ath11k_mac_validate_vht_he_fixed_rate_settings(ar, band, mask))
+ ath11k_warn(ar->ab,
+ "could not update fixed rate settings to all peers due to mcs/nss incompatibility\n");
+ nss = min_t(u32, ar->num_tx_chains,
+ ath11k_mac_max_nss(ht_mcs_mask, vht_mcs_mask, he_mcs_mask));
+
+ /* If multiple rates across different preambles are given
+ * we can reconfigure this info with all peers using PEER_ASSOC
+ * command with the below exception cases.
+ * - Single VHT Rate : peer_assoc command accommodates only MCS
+ * range values i.e 0-7, 0-8, 0-9 for VHT. Though mac80211
+ * mandates passing basic rates along with HT/VHT rates, FW
+ * doesn't allow switching from VHT to Legacy. Hence instead of
+ * setting legacy and VHT rates using RATEMASK_CMD vdev cmd,
+ * we could set this VHT rate as peer fixed rate param, which
+ * will override FIXED rate and FW rate control algorithm.
+ * If single VHT rate is passed along with HT rates, we select
+ * the VHT rate as fixed rate for vht peers.
+ * - Multiple VHT Rates : When Multiple VHT rates are given,this
+ * can be set using RATEMASK CMD which uses FW rate-ctl alg.
+ * TODO: Setting multiple VHT MCS and replacing peer_assoc with
+ * RATEMASK_CMDID can cover all use cases of setting rates
+ * across multiple preambles and rates within same type.
+ * But requires more validation of the command at this point.
+ */
+
+ num_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band,
+ mask);
+
+ if (!ath11k_mac_vht_mcs_range_present(ar, band, mask) &&
+ num_rates > 1) {
+ /* TODO: Handle multiple VHT MCS values setting using
+ * RATEMASK CMD
+ */
+ ath11k_warn(ar->ab,
+ "setting %d mcs values in bitrate mask not supported\n",
+ num_rates);
+ return -EINVAL;
+ }
+
+ num_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band,
+ mask);
+ if (num_rates == 1)
+ he_fixed_rate = true;
+
+ if (!ath11k_mac_he_mcs_range_present(ar, band, mask) &&
+ num_rates > 1) {
+ ath11k_warn(ar->ab,
+ "Setting more than one HE MCS Value in bitrate mask not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ar->conf_mutex);
+ ieee80211_iterate_stations_mtx(ar->hw,
+ ath11k_mac_disable_peer_fixed_rate,
+ arvif);
+
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_set_bitrate_mask_iter,
+ arvif);
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_mac_set_rate_params(arvif, rate, nss, sgi, ldpc, he_gi,
+ he_ltf, he_fixed_rate);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rate params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void
+ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ int recovery_count;
+ struct ath11k_vif *arvif;
+
+ if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ return;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH11K_STATE_RESTARTED) {
+ ath11k_warn(ar->ab, "pdev %d successfully recovered\n",
+ ar->pdev->pdev_id);
+ ar->state = ATH11K_STATE_ON;
+ ieee80211_wake_queues(ar->hw);
+
+ if (ar->ab->hw_params.current_cc_support &&
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0)
+ ath11k_reg_set_cc(ar);
+
+ if (ab->is_reset) {
+ recovery_count = atomic_inc_return(&ab->recovery_count);
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "recovery count %d\n", recovery_count);
+ /* When there are multiple radios in an SOC,
+ * the recovery has to be done for each radio
+ */
+ if (recovery_count == ab->num_radios) {
+ atomic_dec(&ab->reset_count);
+ complete(&ab->reset_complete);
+ ab->is_reset = false;
+ atomic_set(&ab->fail_cont_count, 0);
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset success\n");
+ }
+ }
+ if (ar->ab->hw_params.support_fw_mac_sequence) {
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ieee80211_hw_restart_disconnect(arvif->vif);
+ }
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void
+ath11k_mac_update_bss_chan_survey(struct ath11k *ar,
+ struct ieee80211_channel *channel)
+{
+ int ret;
+ enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) ||
+ ar->rx_channel != channel)
+ return;
+
+ if (ar->scan.state != ATH11K_SCAN_IDLE) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "ignoring bss chan info req while scanning..\n");
+ return;
+ }
+
+ reinit_completion(&ar->bss_survey_done);
+
+ ret = ath11k_wmi_pdev_bss_chan_info_request(ar, type);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send pdev bss chan info request\n");
+ return;
+ }
+
+ ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
+ if (ret == 0)
+ ath11k_warn(ar->ab, "bss channel survey timed out\n");
+}
+
+static int ath11k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath11k *ar = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct survey_info *ar_survey;
+ int ret = 0;
+
+ if (idx >= ATH11K_NUM_CHANS)
+ return -ENOENT;
+
+ ar_survey = &ar->survey[idx];
+
+ mutex_lock(&ar->conf_mutex);
+
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_6GHZ];
+ if (!sband || idx >= sband->n_channels) {
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ ath11k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
+
+ spin_lock_bh(&ar->data_lock);
+ memcpy(survey, ar_survey, sizeof(*survey));
+ spin_unlock_bh(&ar->data_lock);
+
+ survey->channel = &sband->channels[idx];
+
+ if (ar->rx_channel == survey->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath11k_mac_put_chain_rssi(struct station_info *sinfo,
+ struct ath11k_sta *arsta,
+ char *pre,
+ bool clear)
+{
+ struct ath11k *ar = arsta->arvif->ar;
+ int i;
+ s8 rssi;
+
+ for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
+ sinfo->chains &= ~BIT(i);
+ rssi = arsta->chain_signal[i];
+ if (clear)
+ arsta->chain_signal[i] = ATH11K_INVALID_RSSI_FULL;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "sta statistics %s rssi[%d] %d\n", pre, i, rssi);
+
+ if (rssi != ATH11K_DEFAULT_NOISE_FLOOR &&
+ rssi != ATH11K_INVALID_RSSI_FULL &&
+ rssi != ATH11K_INVALID_RSSI_EMPTY &&
+ rssi != 0) {
+ sinfo->chain_signal[i] = rssi;
+ sinfo->chains |= BIT(i);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
+ }
+ }
+}
+
+static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct ath11k *ar = arsta->arvif->ar;
+ s8 signal;
+ bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ar->ab->wmi_ab.svc_map);
+
+ sinfo->rx_duration = arsta->rx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+
+ sinfo->tx_duration = arsta->tx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
+
+ if (arsta->txrate.legacy || arsta->txrate.nss) {
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ sinfo->txrate.he_gi = arsta->txrate.he_gi;
+ sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
+ sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
+
+ ath11k_mac_put_chain_rssi(sinfo, arsta, "ppdu", false);
+
+ mutex_lock(&ar->conf_mutex);
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) &&
+ arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ar->ab->hw_params.supports_rssi_stats &&
+ !ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_RSSI_PER_CHAIN_STAT)) {
+ ath11k_mac_put_chain_rssi(sinfo, arsta, "fw stats", true);
+ }
+
+ signal = arsta->rssi_comb;
+ if (!signal &&
+ arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ar->ab->hw_params.supports_rssi_stats &&
+ !(ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_VDEV_STAT)))
+ signal = arsta->rssi_beacon;
+ mutex_unlock(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "sta statistics db2dbm %u rssi comb %d rssi beacon %d\n",
+ db2dbm, arsta->rssi_comb, arsta->rssi_beacon);
+
+ if (signal) {
+ sinfo->signal = db2dbm ? signal : signal + ATH11K_DEFAULT_NOISE_FLOOR;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+ }
+
+ sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi);
+
+ if (!db2dbm)
+ sinfo->signal_avg += ATH11K_DEFAULT_NOISE_FLOOR;
+
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void ath11k_generate_ns_mc_addr(struct ath11k *ar,
+ struct ath11k_arp_ns_offload *offload)
+{
+ int i;
+
+ for (i = 0; i < offload->ipv6_count; i++) {
+ offload->self_ipv6_addr[i][0] = 0xff;
+ offload->self_ipv6_addr[i][1] = 0x02;
+ offload->self_ipv6_addr[i][11] = 0x01;
+ offload->self_ipv6_addr[i][12] = 0xff;
+ offload->self_ipv6_addr[i][13] =
+ offload->ipv6_addr[i][13];
+ offload->self_ipv6_addr[i][14] =
+ offload->ipv6_addr[i][14];
+ offload->self_ipv6_addr[i][15] =
+ offload->ipv6_addr[i][15];
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "NS solicited addr %pI6\n",
+ offload->self_ipv6_addr[i]);
+ }
+}
+
+static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_arp_ns_offload *offload;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct inet6_ifaddr *ifa6;
+ struct ifacaddr6 *ifaca6;
+ u32 count, scope;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "op ipv6 changed\n");
+
+ offload = &arvif->arp_ns_offload;
+ count = 0;
+
+ /* The _ipv6_changed() is called with RCU lock already held in
+ * atomic_notifier_call_chain(), so we don't need to call
+ * rcu_read_lock() again here. But note that with CONFIG_PREEMPT_RT
+ * enabled, read_lock_bh() also calls rcu_read_lock(). This is OK
+ * because RCU read critical section is allowed to get nested.
+ */
+ read_lock_bh(&idev->lock);
+
+ memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr));
+ memset(offload->self_ipv6_addr, 0, sizeof(offload->self_ipv6_addr));
+ memcpy(offload->mac_addr, vif->addr, ETH_ALEN);
+
+ /* get unicast address */
+ list_for_each_entry(ifa6, &idev->addr_list, if_list) {
+ if (count >= ATH11K_IPV6_MAX_COUNT)
+ goto generate;
+
+ if (ifa6->flags & IFA_F_DADFAILED)
+ continue;
+ scope = ipv6_addr_src_scope(&ifa6->addr);
+ if (scope == IPV6_ADDR_SCOPE_LINKLOCAL ||
+ scope == IPV6_ADDR_SCOPE_GLOBAL) {
+ memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr,
+ sizeof(ifa6->addr.s6_addr));
+ offload->ipv6_type[count] = ATH11K_IPV6_UC_TYPE;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "count %d ipv6 uc %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ } else {
+ ath11k_warn(ar->ab, "Unsupported ipv6 scope: %d\n", scope);
+ }
+ }
+
+ /* get anycast address */
+ for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6;
+ ifaca6 = rcu_dereference(ifaca6->aca_next)) {
+ if (count >= ATH11K_IPV6_MAX_COUNT)
+ goto generate;
+
+ scope = ipv6_addr_src_scope(&ifaca6->aca_addr);
+ if (scope == IPV6_ADDR_SCOPE_LINKLOCAL ||
+ scope == IPV6_ADDR_SCOPE_GLOBAL) {
+ memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr,
+ sizeof(ifaca6->aca_addr));
+ offload->ipv6_type[count] = ATH11K_IPV6_AC_TYPE;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "count %d ipv6 ac %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ } else {
+ ath11k_warn(ar->ab, "Unsupported ipv scope: %d\n", scope);
+ }
+ }
+
+generate:
+ offload->ipv6_count = count;
+ read_unlock_bh(&idev->lock);
+
+ /* generate ns multicast address */
+ ath11k_generate_ns_mc_addr(ar, offload);
+}
+#endif
+
+static void ath11k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_rekey_data *rekey_data = &arvif->rekey_data;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "set rekey data vdev %d\n",
+ arvif->vdev_id);
+
+ mutex_lock(&ar->conf_mutex);
+
+ memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN);
+ memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN);
+
+ /* The supplicant works on big-endian, the firmware expects it on
+ * little endian.
+ */
+ rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr);
+
+ arvif->rekey_data.enable_offload = true;
+
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kck", NULL,
+ rekey_data->kck, NL80211_KCK_LEN);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kek", NULL,
+ rekey_data->kck, NL80211_KEK_LEN);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "replay ctr", NULL,
+ &rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr));
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_mac_op_set_bios_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct ath11k *ar = hw->priv;
+ const struct cfg80211_sar_sub_specs *sspec;
+ int ret, index;
+ u8 *sar_tbl;
+ u32 i;
+
+ if (!sar || sar->type != NL80211_SAR_TYPE_POWER ||
+ sar->num_sub_specs == 0)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (!test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) ||
+ !ar->ab->hw_params.bios_sar_capa) {
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ ret = ath11k_wmi_pdev_set_bios_geo_table_param(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set geo table: %d\n", ret);
+ goto exit;
+ }
+
+ sar_tbl = kzalloc(BIOS_SAR_TABLE_LEN, GFP_KERNEL);
+ if (!sar_tbl) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ sspec = sar->sub_specs;
+ for (i = 0; i < sar->num_sub_specs; i++) {
+ if (sspec->freq_range_index >= (BIOS_SAR_TABLE_LEN >> 1)) {
+ ath11k_warn(ar->ab, "Ignore bad frequency index %u, max allowed %u\n",
+ sspec->freq_range_index, BIOS_SAR_TABLE_LEN >> 1);
+ continue;
+ }
+
+ /* chain0 and chain1 share same power setting */
+ sar_tbl[sspec->freq_range_index] = sspec->power;
+ index = sspec->freq_range_index + (BIOS_SAR_TABLE_LEN >> 1);
+ sar_tbl[index] = sspec->power;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "sar tbl[%d] = %d\n",
+ sspec->freq_range_index, sar_tbl[sspec->freq_range_index]);
+ sspec++;
+ }
+
+ ret = ath11k_wmi_pdev_set_bios_sar_table_param(ar, sar_tbl);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set sar power: %d", ret);
+
+ kfree(sar_tbl);
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.roc_notify = false;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath11k_scan_abort(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+
+ return 0;
+}
+
+static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct scan_req_params *arg;
+ int ret;
+ u32 scan_time_msec;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ reinit_completion(&ar->scan.on_channel);
+ ar->scan.state = ATH11K_SCAN_STARTING;
+ ar->scan.is_roc = true;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.roc_freq = chan->center_freq;
+ ar->scan.roc_notify = true;
+ ret = 0;
+ break;
+ case ATH11K_SCAN_STARTING:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
+
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+ if (!arg) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ ath11k_wmi_start_scan_init(ar, arg);
+ arg->num_chan = 1;
+ arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list),
+ GFP_KERNEL);
+ if (!arg->chan_list) {
+ ret = -ENOMEM;
+ goto free_arg;
+ }
+
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH11K_SCAN_ID;
+ arg->chan_list[0] = chan->center_freq;
+ arg->dwell_time_active = scan_time_msec;
+ arg->dwell_time_passive = scan_time_msec;
+ arg->max_scan_time = scan_time_msec;
+ arg->scan_f_passive = 1;
+ arg->burst_duration = duration;
+
+ if (!ar->ab->hw_params.single_pdev_only)
+ arg->scan_f_filter_prb_req = 1;
+
+ ret = ath11k_start_scan(ar, arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH11K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ goto free_chan_list;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ar->ab, "failed to switch to channel for roc scan\n");
+ ret = ath11k_scan_stop(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+ ret = -ETIMEDOUT;
+ goto free_chan_list;
+ }
+
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(duration));
+
+ ret = 0;
+
+free_chan_list:
+ kfree(arg->chan_list);
+free_arg:
+ kfree(arg);
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_mac_station_add(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ struct peer_create_params peer_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath11k_mac_inc_num_stations(arvif, sta);
+ if (ret) {
+ ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
+ goto exit;
+ }
+
+ /* Driver allows the DEL KEY followed by SET KEY sequence for
+ * group keys for only when there is no clients associated, if at
+ * all firmware has entered the race during that window,
+ * reinstalling the same key when the first sta connects will allow
+ * firmware to recover from the race.
+ */
+ if (arvif->num_stations == 1 && arvif->reinstall_group_keys) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "set group keys on 1st station add for vdev %d\n",
+ arvif->vdev_id);
+ ret = ath11k_set_group_keys(arvif);
+ if (ret)
+ goto dec_num_station;
+ arvif->reinstall_group_keys = false;
+ }
+
+ arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
+ if (!arsta->rx_stats) {
+ ret = -ENOMEM;
+ goto dec_num_station;
+ }
+
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = sta->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+
+ ret = ath11k_peer_create(ar, arvif, sta, &peer_param);
+ if (ret) {
+ ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ goto free_rx_stats;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
+ arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
+ if (!arsta->tx_stats) {
+ ret = -ENOMEM;
+ goto free_peer;
+ }
+ }
+
+ if (ieee80211_vif_is_mesh(vif)) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "setting USE_4ADDR for mesh STA %pM\n", sta->addr);
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+ if (ret) {
+ ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n",
+ sta->addr, ret);
+ goto free_tx_stats;
+ }
+ }
+
+ ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto free_tx_stats;
+ }
+
+ if (ab->hw_params.vdev_start_delay &&
+ !arvif->is_started &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ ret = ath11k_mac_start_vdev_delay(ar->hw, vif);
+ if (ret) {
+ ath11k_warn(ab, "failed to delay vdev start: %d\n", ret);
+ goto free_tx_stats;
+ }
+ }
+
+ ewma_avg_rssi_init(&arsta->avg_rssi);
+ return 0;
+
+free_tx_stats:
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+free_peer:
+ ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+free_rx_stats:
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+dec_num_station:
+ ath11k_mac_dec_num_stations(arvif, sta);
+exit:
+ return ret;
+}
+
+static int ath11k_mac_station_remove(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ int ret;
+
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->is_started &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ ret = ath11k_mac_stop_vdev_early(ar->hw, vif);
+ if (ret) {
+ ath11k_warn(ab, "failed to do early vdev stop: %d\n", ret);
+ return ret;
+ }
+ }
+
+ ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath11k_warn(ab, "Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ ath11k_mac_dec_num_stations(arvif, sta);
+
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+
+ return ret;
+}
+
+static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ enum ieee80211_ap_reg_power power_type;
+ struct cur_regulatory_info *reg_info;
+ struct ath11k_peer *peer;
+ int ret = 0;
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ cancel_work_sync(&arsta->update_wk);
+ cancel_work_sync(&arsta->set_4addr_wk);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
+ INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
+ INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
+
+ ret = ath11k_mac_station_add(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ } else if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ ret = ath11k_mac_station_remove(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ mutex_lock(&ar->ab->tbl_mtx_lock);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer && peer->sta == sta) {
+ ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ ath11k_peer_rhash_delete(ar->ab, peer);
+ peer->sta = NULL;
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_assoc(ar, vif, sta, false);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
+ sta->addr);
+
+ spin_lock_bh(&ar->data_lock);
+ /* Set arsta bw and prev bw */
+ arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
+ arsta->bw_prev = arsta->bw;
+ spin_unlock_bh(&ar->data_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ }
+
+ if (!ret &&
+ ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ arvif->chanctx.def.chan &&
+ arvif->chanctx.def.chan->band == NL80211_BAND_6GHZ) {
+ reg_info = &ar->ab->reg_info_store[ar->pdev_idx];
+ power_type = vif->bss_conf.power_type;
+
+ if (power_type == IEEE80211_REG_UNSET_AP) {
+ ath11k_warn(ar->ab, "invalid power type %d\n",
+ power_type);
+ ret = -EINVAL;
+ } else {
+ ret = ath11k_reg_handle_chan_list(ar->ab,
+ reg_info,
+ power_type);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to handle chan list with power type %d\n",
+ power_type);
+ }
+ }
+ } else if (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = false;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_disassoc(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct ieee80211_ops ath11k_ops = {
+ .tx = ath11k_mac_op_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
+ .start = ath11k_mac_op_start,
+ .stop = ath11k_mac_op_stop,
+ .reconfig_complete = ath11k_mac_op_reconfig_complete,
+ .add_interface = ath11k_mac_op_add_interface,
+ .remove_interface = ath11k_mac_op_remove_interface,
+ .update_vif_offload = ath11k_mac_op_update_vif_offload,
+ .config = ath11k_mac_op_config,
+ .bss_info_changed = ath11k_mac_op_bss_info_changed,
+ .configure_filter = ath11k_mac_op_configure_filter,
+ .hw_scan = ath11k_mac_op_hw_scan,
+ .cancel_hw_scan = ath11k_mac_op_cancel_hw_scan,
+ .set_key = ath11k_mac_op_set_key,
+ .set_rekey_data = ath11k_mac_op_set_rekey_data,
+ .sta_state = ath11k_mac_op_sta_state,
+ .sta_set_4addr = ath11k_mac_op_sta_set_4addr,
+ .sta_set_txpwr = ath11k_mac_op_sta_set_txpwr,
+ .link_sta_rc_update = ath11k_mac_op_sta_rc_update,
+ .conf_tx = ath11k_mac_op_conf_tx,
+ .set_antenna = ath11k_mac_op_set_antenna,
+ .get_antenna = ath11k_mac_op_get_antenna,
+ .ampdu_action = ath11k_mac_op_ampdu_action,
+ .add_chanctx = ath11k_mac_op_add_chanctx,
+ .remove_chanctx = ath11k_mac_op_remove_chanctx,
+ .change_chanctx = ath11k_mac_op_change_chanctx,
+ .assign_vif_chanctx = ath11k_mac_op_assign_vif_chanctx,
+ .unassign_vif_chanctx = ath11k_mac_op_unassign_vif_chanctx,
+ .switch_vif_chanctx = ath11k_mac_op_switch_vif_chanctx,
+ .set_rts_threshold = ath11k_mac_op_set_rts_threshold,
+ .set_frag_threshold = ath11k_mac_op_set_frag_threshold,
+ .set_bitrate_mask = ath11k_mac_op_set_bitrate_mask,
+ .get_survey = ath11k_mac_op_get_survey,
+ .flush = ath11k_mac_op_flush,
+ .sta_statistics = ath11k_mac_op_sta_statistics,
+ CFG80211_TESTMODE_CMD(ath11k_tm_cmd)
+
+#ifdef CONFIG_PM
+ .suspend = ath11k_wow_op_suspend,
+ .resume = ath11k_wow_op_resume,
+ .set_wakeup = ath11k_wow_op_set_wakeup,
+#endif
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+ .vif_add_debugfs = ath11k_debugfs_op_vif_add,
+ .sta_add_debugfs = ath11k_debugfs_sta_op_add,
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+ .ipv6_addr_change = ath11k_mac_op_ipv6_changed,
+#endif
+ .get_txpower = ath11k_mac_op_get_txpower,
+
+ .set_sar_specs = ath11k_mac_op_set_bios_sar_specs,
+ .remain_on_channel = ath11k_mac_op_remain_on_channel,
+ .cancel_remain_on_channel = ath11k_mac_op_cancel_remain_on_channel,
+};
+
+static void ath11k_mac_update_ch_list(struct ath11k *ar,
+ struct ieee80211_supported_band *band,
+ u32 freq_low, u32 freq_high)
+{
+ int i;
+
+ if (!(freq_low && freq_high))
+ return;
+
+ for (i = 0; i < band->n_channels; i++) {
+ if (band->channels[i].center_freq < freq_low ||
+ band->channels[i].center_freq > freq_high)
+ band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
+ }
+}
+
+static u32 ath11k_get_phy_id(struct ath11k *ar, u32 band)
+{
+ struct ath11k_pdev *pdev = ar->pdev;
+ struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
+
+ if (band == WMI_HOST_WLAN_2G_CAP)
+ return pdev_cap->band[NL80211_BAND_2GHZ].phy_id;
+
+ if (band == WMI_HOST_WLAN_5G_CAP)
+ return pdev_cap->band[NL80211_BAND_5GHZ].phy_id;
+
+ ath11k_warn(ar->ab, "unsupported phy cap:%d\n", band);
+
+ return 0;
+}
+
+static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
+ u32 supported_bands)
+{
+ struct ieee80211_supported_band *band;
+ struct ath11k_hal_reg_capabilities_ext *reg_cap, *temp_reg_cap;
+ void *channels;
+ u32 phy_id;
+
+ BUILD_BUG_ON((ARRAY_SIZE(ath11k_2ghz_channels) +
+ ARRAY_SIZE(ath11k_5ghz_channels) +
+ ARRAY_SIZE(ath11k_6ghz_channels)) !=
+ ATH11K_NUM_CHANS);
+
+ reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
+ temp_reg_cap = reg_cap;
+
+ if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ channels = kmemdup(ath11k_2ghz_channels,
+ sizeof(ath11k_2ghz_channels),
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->band = NL80211_BAND_2GHZ;
+ band->n_channels = ARRAY_SIZE(ath11k_2ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath11k_g_rates_size;
+ band->bitrates = ath11k_g_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+
+ if (ar->ab->hw_params.single_pdev_only) {
+ phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
+ temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
+ ath11k_mac_update_ch_list(ar, band,
+ temp_reg_cap->low_2ghz_chan,
+ temp_reg_cap->high_2ghz_chan);
+ }
+
+ if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ if (reg_cap->high_5ghz_chan >= ATH11K_MIN_6G_FREQ) {
+ channels = kmemdup(ath11k_6ghz_channels,
+ sizeof(ath11k_6ghz_channels), GFP_KERNEL);
+ if (!channels) {
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ return -ENOMEM;
+ }
+
+ ar->supports_6ghz = true;
+ band = &ar->mac.sbands[NL80211_BAND_6GHZ];
+ band->band = NL80211_BAND_6GHZ;
+ band->n_channels = ARRAY_SIZE(ath11k_6ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath11k_a_rates_size;
+ band->bitrates = ath11k_a_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
+
+ if (ar->ab->hw_params.single_pdev_only) {
+ phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
+ temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
+
+ ath11k_mac_update_ch_list(ar, band,
+ temp_reg_cap->low_5ghz_chan,
+ temp_reg_cap->high_5ghz_chan);
+ }
+
+ if (reg_cap->low_5ghz_chan < ATH11K_MIN_6G_FREQ) {
+ channels = kmemdup(ath11k_5ghz_channels,
+ sizeof(ath11k_5ghz_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+ return -ENOMEM;
+ }
+
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->band = NL80211_BAND_5GHZ;
+ band->n_channels = ARRAY_SIZE(ath11k_5ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath11k_a_rates_size;
+ band->bitrates = ath11k_a_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+
+ if (ar->ab->hw_params.single_pdev_only) {
+ phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
+ temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
+
+ ath11k_mac_update_ch_list(ar, band,
+ temp_reg_cap->low_5ghz_chan,
+ temp_reg_cap->high_5ghz_chan);
+ }
+ }
+
+ return 0;
+}
+
+static void ath11k_mac_setup_mac_address_list(struct ath11k *ar)
+{
+ struct mac_address *addresses;
+ u16 n_addresses;
+ int i;
+
+ if (!ar->ab->hw_params.support_dual_stations)
+ return;
+
+ n_addresses = ar->ab->hw_params.num_vdevs;
+ addresses = kcalloc(n_addresses, sizeof(*addresses), GFP_KERNEL);
+ if (!addresses)
+ return;
+
+ memcpy(addresses[0].addr, ar->mac_addr, ETH_ALEN);
+ for (i = 1; i < n_addresses; i++) {
+ memcpy(addresses[i].addr, ar->mac_addr, ETH_ALEN);
+ /* set Local Administered Address bit */
+ addresses[i].addr[0] |= 0x2;
+
+ addresses[i].addr[0] += (i - 1) << 4;
+ }
+
+ ar->hw->wiphy->addresses = addresses;
+ ar->hw->wiphy->n_addresses = n_addresses;
+}
+
+static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_iface_combination *combinations;
+ struct ieee80211_iface_limit *limits;
+ int n_limits, n_combos;
+ bool p2p;
+
+ p2p = ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE);
+
+ if (ab->hw_params.support_dual_stations)
+ n_combos = 2;
+ else
+ n_combos = 1;
+
+ combinations = kcalloc(n_combos, sizeof(*combinations), GFP_KERNEL);
+ if (!combinations)
+ return -ENOMEM;
+
+ if (p2p)
+ n_limits = 3;
+ else
+ n_limits = 2;
+
+ limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
+ if (!limits) {
+ kfree(combinations);
+ return -ENOMEM;
+ }
+
+ limits[0].max = 1;
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
+ limits[1].max = 16;
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+ if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+
+ combinations[0].limits = limits;
+ combinations[0].n_limits = n_limits;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+ combinations[0].max_interfaces = 16;
+ combinations[0].num_different_channels = 1;
+ combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160);
+
+ if (ab->hw_params.support_dual_stations) {
+ limits[0].max = 2;
+
+ combinations[1].limits = limits;
+ combinations[1].n_limits = n_limits;
+ combinations[1].beacon_int_infra_match = true;
+ combinations[1].beacon_int_min_gcd = 100;
+ combinations[1].max_interfaces = ab->hw_params.num_vdevs;
+ combinations[1].num_different_channels = 2;
+ }
+
+ if (p2p) {
+ limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+ limits[2].max = 1;
+ limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE);
+ }
+
+ ar->hw->wiphy->iface_combinations = combinations;
+ ar->hw->wiphy->n_iface_combinations = n_combos;
+
+ return 0;
+}
+
+static const u8 ath11k_if_types_ext_capa[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+};
+
+static const u8 ath11k_if_types_ext_capa_sta[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
+};
+
+static const u8 ath11k_if_types_ext_capa_ap[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+ [10] = WLAN_EXT_CAPA11_EMA_SUPPORT,
+};
+
+static const struct wiphy_iftype_ext_capab ath11k_iftypes_ext_capa[] = {
+ {
+ .extended_capabilities = ath11k_if_types_ext_capa,
+ .extended_capabilities_mask = ath11k_if_types_ext_capa,
+ .extended_capabilities_len = sizeof(ath11k_if_types_ext_capa),
+ }, {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = ath11k_if_types_ext_capa_sta,
+ .extended_capabilities_mask = ath11k_if_types_ext_capa_sta,
+ .extended_capabilities_len =
+ sizeof(ath11k_if_types_ext_capa_sta),
+ }, {
+ .iftype = NL80211_IFTYPE_AP,
+ .extended_capabilities = ath11k_if_types_ext_capa_ap,
+ .extended_capabilities_mask = ath11k_if_types_ext_capa_ap,
+ .extended_capabilities_len =
+ sizeof(ath11k_if_types_ext_capa_ap),
+ },
+};
+
+static void __ath11k_mac_unregister(struct ath11k *ar)
+{
+ cancel_work_sync(&ar->channel_update_work);
+ cancel_work_sync(&ar->regd_update_work);
+
+ ieee80211_unregister_hw(ar->hw);
+
+ idr_for_each(&ar->txmgmt_idr, ath11k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+
+ kfree(ar->hw->wiphy->iface_combinations[0].limits);
+ kfree(ar->hw->wiphy->iface_combinations);
+
+ kfree(ar->hw->wiphy->addresses);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+}
+
+void ath11k_mac_unregister(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ __ath11k_mac_unregister(ar);
+ }
+
+ ath11k_peer_rhash_tbl_destroy(ab);
+}
+
+static int __ath11k_mac_register(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_pdev_cap *cap = &ar->pdev->cap;
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ WLAN_CIPHER_SUITE_BIP_CMAC_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_CCMP_256,
+ };
+ int ret;
+ u32 ht_cap = 0;
+
+ ath11k_pdev_caps_update(ar);
+
+ SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+ ath11k_mac_setup_mac_address_list(ar);
+
+ SET_IEEE80211_DEV(ar->hw, ab->dev);
+
+ ret = ath11k_mac_setup_channels_rates(ar,
+ cap->supported_bands);
+ if (ret)
+ goto err;
+
+ wiphy_read_of_freq_limits(ar->hw->wiphy);
+ ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
+ ath11k_mac_setup_he_cap(ar, cap);
+
+ ret = ath11k_mac_setup_iface_combinations(ar);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to setup interface combinations: %d\n", ret);
+ goto err_free_channels;
+ }
+
+ ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask;
+ ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask;
+
+ ar->hw->wiphy->interface_modes = ab->hw_params.interface_modes;
+
+ if (ab->hw_params.single_pdev_only && ar->supports_6ghz)
+ ieee80211_hw_set(ar->hw, SINGLE_SCAN_ON_ALL_BANDS);
+
+ if (ab->hw_params.supports_multi_bssid) {
+ ieee80211_hw_set(ar->hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(ar->hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
+ }
+
+ ieee80211_hw_set(ar->hw, SIGNAL_DBM);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PS);
+ ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(ar->hw, MFP_CAPABLE);
+ ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(ar->hw, AP_LINK_PS);
+ ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
+ ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
+ ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
+
+ if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET) {
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+ ieee80211_hw_set(ar->hw, SUPPORTS_RX_DECAP_OFFLOAD);
+ }
+
+ if (cap->nss_ratio_enabled)
+ ieee80211_hw_set(ar->hw, SUPPORTS_VHT_EXT_NSS_BW);
+
+ if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) {
+ ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
+ ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(ar->hw, USES_RSS);
+ }
+
+ ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+ /* TODO: Check if HT capability advertised from firmware is different
+ * for each band for a dual band capable radio. It will be tricky to
+ * handle it when the ht capability different for each band.
+ */
+ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS ||
+ (ar->supports_6ghz && ab->hw_params.supports_dynamic_smps_6ghz))
+ ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
+
+ ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+ ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+
+ ar->hw->max_listen_interval = ATH11K_MAX_HW_LISTEN_INTERVAL;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_AP_SCAN;
+
+ ar->hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
+
+ ar->max_num_stations = TARGET_NUM_STATIONS(ab);
+ ar->max_num_peers = TARGET_NUM_PEERS_PDEV(ab);
+
+ ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+
+ if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) {
+ ar->hw->wiphy->features |=
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
+ ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
+ ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
+ ar->hw->wiphy->max_sched_scan_plan_interval =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
+ ar->hw->wiphy->max_sched_scan_plan_iterations =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
+ ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
+ }
+
+ ret = ath11k_wow_init(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to init wow: %d\n", ret);
+ goto err_free_if_combs;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI,
+ ar->ab->wmi_ab.svc_map))
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
+
+ ar->hw->queues = ATH11K_HW_MAX_QUEUES;
+ ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN;
+ ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
+ ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+
+ ar->hw->vif_data_size = sizeof(struct ath11k_vif);
+ ar->hw->sta_data_size = sizeof(struct ath11k_sta);
+
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
+ if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD,
+ ar->ab->wmi_ab.svc_map)) {
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_BSS_COLOR);
+ ieee80211_hw_set(ar->hw, DETECTS_COLOR_COLLISION);
+ }
+
+ ar->hw->wiphy->cipher_suites = cipher_suites;
+ ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ ar->hw->wiphy->iftype_ext_capab = ath11k_iftypes_ext_capa;
+ ar->hw->wiphy->num_iftype_ext_capab =
+ ARRAY_SIZE(ath11k_iftypes_ext_capa);
+
+ if (ar->supports_6ghz) {
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_FILS_DISCOVERY);
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
+ }
+
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+
+ if (test_bit(WMI_TLV_SERVICE_RTT, ar->ab->wmi_ab.svc_map))
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER);
+
+ ar->hw->wiphy->mbssid_max_interfaces = TARGET_NUM_VDEVS(ab);
+ ar->hw->wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD;
+
+ ath11k_reg_init(ar);
+
+ if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
+ ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) &&
+ ab->hw_params.bios_sar_capa)
+ ar->hw->wiphy->sar_capa = ab->hw_params.bios_sar_capa;
+
+ ret = ieee80211_register_hw(ar->hw);
+ if (ret) {
+ ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
+ goto err_free_if_combs;
+ }
+
+ if (!ab->hw_params.supports_monitor)
+ /* There's a race between calling ieee80211_register_hw()
+ * and here where the monitor mode is enabled for a little
+ * while. But that time is so short and in practise it make
+ * a difference in real life.
+ */
+ ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
+
+ /* Apply the regd received during initialization */
+ ret = ath11k_regd_update(ar);
+ if (ret) {
+ ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
+ goto err_unregister_hw;
+ }
+
+ if (ab->hw_params.current_cc_support && ab->new_alpha2[0]) {
+ memcpy(&ar->alpha2, ab->new_alpha2, 2);
+ ret = ath11k_reg_set_cc(ar);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed set cc code for mac register: %d\n", ret);
+ }
+
+ ret = ath11k_debugfs_register(ar);
+ if (ret) {
+ ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret);
+ goto err_unregister_hw;
+ }
+
+ return 0;
+
+err_unregister_hw:
+ ieee80211_unregister_hw(ar->hw);
+
+err_free_if_combs:
+ kfree(ar->hw->wiphy->iface_combinations[0].limits);
+ kfree(ar->hw->wiphy->iface_combinations);
+
+err_free_channels:
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+
+err:
+ SET_IEEE80211_DEV(ar->hw, NULL);
+ return ret;
+}
+
+int ath11k_mac_register(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+ int ret;
+ u8 mac_addr[ETH_ALEN] = {};
+
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ /* Initialize channel counters frequency value in hertz */
+ ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
+
+ ret = ath11k_peer_rhash_tbl_init(ab);
+ if (ret)
+ return ret;
+
+ device_get_mac_address(ab->dev, mac_addr);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ab->pdevs_macaddr_valid) {
+ ether_addr_copy(ar->mac_addr, pdev->mac_addr);
+ } else {
+ if (is_zero_ether_addr(mac_addr))
+ ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ else
+ ether_addr_copy(ar->mac_addr, mac_addr);
+ ar->mac_addr[4] += i;
+ }
+
+ idr_init(&ar->txmgmt_idr);
+ spin_lock_init(&ar->txmgmt_idr_lock);
+
+ ret = __ath11k_mac_register(ar);
+ if (ret)
+ goto err_cleanup;
+
+ init_waitqueue_head(&ar->txmgmt_empty_waitq);
+ }
+
+ return 0;
+
+err_cleanup:
+ for (i = i - 1; i >= 0; i--) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ __ath11k_mac_unregister(ar);
+ }
+
+ ath11k_peer_rhash_tbl_destroy(ab);
+
+ return ret;
+}
+
+int ath11k_mac_allocate(struct ath11k_base *ab)
+{
+ struct ieee80211_hw *hw;
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int ret;
+ int i;
+
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ hw = ieee80211_alloc_hw(sizeof(struct ath11k), &ath11k_ops);
+ if (!hw) {
+ ath11k_warn(ab, "failed to allocate mac80211 hw device\n");
+ ret = -ENOMEM;
+ goto err_free_mac;
+ }
+
+ ar = hw->priv;
+ ar->hw = hw;
+ ar->ab = ab;
+ ar->pdev = pdev;
+ ar->pdev_idx = i;
+ ar->lmac_id = ath11k_hw_get_mac_from_pdev_id(&ab->hw_params, i);
+
+ ar->wmi = &ab->wmi_ab.wmi[i];
+ /* FIXME wmi[0] is already initialized during attach,
+ * Should we do this again?
+ */
+ ath11k_wmi_pdev_attach(ab, i);
+
+ ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
+ ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
+ ar->num_tx_chains = get_num_chains(pdev->cap.tx_chain_mask);
+ ar->num_rx_chains = get_num_chains(pdev->cap.rx_chain_mask);
+
+ pdev->ar = ar;
+ spin_lock_init(&ar->data_lock);
+ INIT_LIST_HEAD(&ar->arvifs);
+ INIT_LIST_HEAD(&ar->ppdu_stats_info);
+ mutex_init(&ar->conf_mutex);
+ init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->vdev_delete_done);
+ init_completion(&ar->peer_assoc_done);
+ init_completion(&ar->peer_delete_done);
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->bss_survey_done);
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+ init_completion(&ar->scan.on_channel);
+ init_completion(&ar->thermal.wmi_sync);
+
+ INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
+ INIT_WORK(&ar->channel_update_work, ath11k_regd_update_chan_list_work);
+ INIT_LIST_HEAD(&ar->channel_update_queue);
+ INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
+ clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+
+ ar->monitor_vdev_id = -1;
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
+ init_completion(&ar->completed_11d_scan);
+
+ ath11k_fw_stats_init(ar);
+ }
+
+ return 0;
+
+err_free_mac:
+ ath11k_mac_destroy(ab);
+
+ return ret;
+}
+
+void ath11k_mac_destroy(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ ath11k_fw_stats_free(&ar->fw_stats);
+ ieee80211_free_hw(ar->hw);
+ pdev->ar = NULL;
+ }
+}
+
+int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct ath11k *ar = arvif->ar;
+ struct wmi_sta_keepalive_arg arg = {};
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map))
+ return 0;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enabled = 1;
+ arg.method = method;
+ arg.interval = interval;
+
+ ret = ath11k_wmi_sta_keepalive(ar, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
new file mode 100644
index 000000000000..5e61eea1bb03
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_MAC_H
+#define ATH11K_MAC_H
+
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+#include "wmi.h"
+
+struct ath11k;
+struct ath11k_base;
+
+struct ath11k_generic_iter {
+ struct ath11k *ar;
+ int ret;
+};
+
+/* number of failed packets (20 packets with 16 sw reties each) */
+#define ATH11K_KICKOUT_THRESHOLD (20 * 16)
+
+/* Use insanely high numbers to make sure that the firmware implementation
+ * won't start, we have the same functionality already in hostapd. Unit
+ * is seconds.
+ */
+#define ATH11K_KEEPALIVE_MIN_IDLE 3747
+#define ATH11K_KEEPALIVE_MAX_IDLE 3895
+#define ATH11K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+
+#define WMI_HOST_RC_DS_FLAG 0x01
+#define WMI_HOST_RC_CW40_FLAG 0x02
+#define WMI_HOST_RC_SGI_FLAG 0x04
+#define WMI_HOST_RC_HT_FLAG 0x08
+#define WMI_HOST_RC_RTSCTS_FLAG 0x10
+#define WMI_HOST_RC_TX_STBC_FLAG 0x20
+#define WMI_HOST_RC_RX_STBC_FLAG 0xC0
+#define WMI_HOST_RC_RX_STBC_FLAG_S 6
+#define WMI_HOST_RC_WEP_TKIP_FLAG 0x100
+#define WMI_HOST_RC_TS_FLAG 0x200
+#define WMI_HOST_RC_UAPSD_FLAG 0x400
+
+#define WMI_HT_CAP_ENABLED 0x0001
+#define WMI_HT_CAP_HT20_SGI 0x0002
+#define WMI_HT_CAP_DYNAMIC_SMPS 0x0004
+#define WMI_HT_CAP_TX_STBC 0x0008
+#define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3
+#define WMI_HT_CAP_RX_STBC 0x0030
+#define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4
+#define WMI_HT_CAP_LDPC 0x0040
+#define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080
+#define WMI_HT_CAP_MPDU_DENSITY 0x0700
+#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
+#define WMI_HT_CAP_HT40_SGI 0x0800
+#define WMI_HT_CAP_RX_LDPC 0x1000
+#define WMI_HT_CAP_TX_LDPC 0x2000
+#define WMI_HT_CAP_IBF_BFER 0x4000
+
+/* These macros should be used when we wish to advertise STBC support for
+ * only 1SS or 2SS or 3SS.
+ */
+#define WMI_HT_CAP_RX_STBC_1SS 0x0010
+#define WMI_HT_CAP_RX_STBC_2SS 0x0020
+#define WMI_HT_CAP_RX_STBC_3SS 0x0030
+
+#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \
+ WMI_HT_CAP_HT20_SGI | \
+ WMI_HT_CAP_HT40_SGI | \
+ WMI_HT_CAP_TX_STBC | \
+ WMI_HT_CAP_RX_STBC | \
+ WMI_HT_CAP_LDPC)
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003
+#define WMI_VHT_CAP_RX_LDPC 0x00000010
+#define WMI_VHT_CAP_SGI_80MHZ 0x00000020
+#define WMI_VHT_CAP_SGI_160MHZ 0x00000040
+#define WMI_VHT_CAP_TX_STBC 0x00000080
+#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300
+#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8
+#define WMI_VHT_CAP_SU_BFER 0x00000800
+#define WMI_VHT_CAP_SU_BFEE 0x00001000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK 0x00070000
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16
+#define WMI_VHT_CAP_MU_BFER 0x00080000
+#define WMI_VHT_CAP_MU_BFEE 0x00100000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIT 23
+#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000
+#define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002
+
+/* These macros should be used when we wish to advertise STBC support for
+ * only 1SS or 2SS or 3SS.
+ */
+#define WMI_VHT_CAP_RX_STBC_1SS 0x00000100
+#define WMI_VHT_CAP_RX_STBC_2SS 0x00000200
+#define WMI_VHT_CAP_RX_STBC_3SS 0x00000300
+
+#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \
+ WMI_VHT_CAP_SGI_80MHZ | \
+ WMI_VHT_CAP_TX_STBC | \
+ WMI_VHT_CAP_RX_STBC_MASK | \
+ WMI_VHT_CAP_RX_LDPC | \
+ WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \
+ WMI_VHT_CAP_RX_FIXED_ANT | \
+ WMI_VHT_CAP_TX_FIXED_ANT)
+
+/* FIXME: should these be in ieee80211.h? */
+#define IEEE80211_VHT_MCS_SUPPORT_0_11_MASK GENMASK(23, 16)
+#define IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11 BIT(24)
+
+#define WMI_MAX_SPATIAL_STREAM 3
+
+#define ATH11K_CHAN_WIDTH_NUM 8
+#define ATH11K_BW_NSS_MAP_ENABLE BIT(31)
+#define ATH11K_PEER_RX_NSS_160MHZ GENMASK(2, 0)
+#define ATH11K_PEER_RX_NSS_80_80MHZ GENMASK(5, 3)
+
+#define ATH11K_OBSS_PD_MAX_THRESHOLD -82
+#define ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD -62
+#define ATH11K_OBSS_PD_THRESHOLD_IN_DBM BIT(29)
+#define ATH11K_OBSS_PD_SRG_EN BIT(30)
+#define ATH11K_OBSS_PD_NON_SRG_EN BIT(31)
+
+extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
+
+#define ATH11K_SCAN_11D_INTERVAL 600000
+#define ATH11K_11D_INVALID_VDEV_ID 0xFFFF
+
+void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id);
+void ath11k_mac_11d_scan_stop(struct ath11k *ar);
+void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab);
+
+void ath11k_mac_destroy(struct ath11k_base *ab);
+void ath11k_mac_unregister(struct ath11k_base *ab);
+int ath11k_mac_register(struct ath11k_base *ab);
+int ath11k_mac_allocate(struct ath11k_base *ab);
+int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
+ u16 *rate);
+u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate);
+u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck);
+
+void __ath11k_mac_scan_finish(struct ath11k *ar);
+void ath11k_mac_scan_finish(struct ath11k *ar);
+
+struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id);
+struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
+ u32 vdev_id);
+u8 ath11k_mac_get_target_pdev_id(struct ath11k *ar);
+u8 ath11k_mac_get_target_pdev_id_from_vif(struct ath11k_vif *arvif);
+struct ath11k_vif *ath11k_mac_get_vif_up(struct ath11k_base *ab);
+
+struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id);
+struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id);
+
+void ath11k_mac_drain_tx(struct ath11k *ar);
+void ath11k_mac_peer_cleanup_all(struct ath11k *ar);
+int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
+u8 ath11k_mac_bw_to_mac80211_bw(u8 bw);
+enum nl80211_he_gi ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi);
+enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy);
+enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones);
+enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw);
+enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher);
+void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb);
+void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id);
+void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif);
+int ath11k_mac_wait_tx_complete(struct ath11k *ar);
+int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval);
+void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx);
+int ath11k_mac_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
new file mode 100644
index 000000000000..acd76e9392d3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/ioport.h>
+
+#include "core.h"
+#include "debug.h"
+#include "mhi.h"
+#include "pci.h"
+#include "pcic.h"
+
+#define MHI_TIMEOUT_DEFAULT_MS 20000
+#define RDDM_DUMP_SIZE 0x420000
+#define MHI_CB_INVALID 0xff
+
+static const struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
+ {
+ .num = 20,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 21,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ },
+};
+
+static struct mhi_event_config ath11k_mhi_events_qca6390[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 1,
+ .mode = MHI_DB_BRST_DISABLE,
+ .data_type = MHI_ER_CTRL,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+ {
+ .num_elements = 256,
+ .irq_moderation_ms = 1,
+ .irq = 2,
+ .mode = MHI_DB_BRST_DISABLE,
+ .priority = 1,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+static const struct mhi_controller_config ath11k_mhi_config_qca6390 = {
+ .max_channels = 128,
+ .timeout_ms = 2000,
+ .use_bounce_buf = false,
+ .buf_len = 8192,
+ .num_channels = ARRAY_SIZE(ath11k_mhi_channels_qca6390),
+ .ch_cfg = ath11k_mhi_channels_qca6390,
+ .num_events = ARRAY_SIZE(ath11k_mhi_events_qca6390),
+ .event_cfg = ath11k_mhi_events_qca6390,
+};
+
+static const struct mhi_channel_config ath11k_mhi_channels_qcn9074[] = {
+ {
+ .num = 20,
+ .name = "IPCR",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x14,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 21,
+ .name = "IPCR",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x14,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ },
+};
+
+static struct mhi_event_config ath11k_mhi_events_qcn9074[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 1,
+ .data_type = MHI_ER_CTRL,
+ .mode = MHI_DB_BRST_DISABLE,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+ {
+ .num_elements = 256,
+ .irq_moderation_ms = 1,
+ .irq = 2,
+ .mode = MHI_DB_BRST_DISABLE,
+ .priority = 1,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+static const struct mhi_controller_config ath11k_mhi_config_qcn9074 = {
+ .max_channels = 30,
+ .timeout_ms = 10000,
+ .use_bounce_buf = false,
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(ath11k_mhi_channels_qcn9074),
+ .ch_cfg = ath11k_mhi_channels_qcn9074,
+ .num_events = ARRAY_SIZE(ath11k_mhi_events_qcn9074),
+ .event_cfg = ath11k_mhi_events_qcn9074,
+};
+
+void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab)
+{
+ u32 val;
+
+ val = ath11k_pcic_read32(ab, MHISTATUS);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "mhistatus 0x%x\n", val);
+
+ /* After SOC_GLOBAL_RESET, MHISTATUS may still have SYSERR bit set
+ * and thus need to set MHICTRL_RESET to clear SYSERR.
+ */
+ ath11k_pcic_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
+
+ mdelay(10);
+}
+
+static void ath11k_mhi_reset_txvecdb(struct ath11k_base *ab)
+{
+ ath11k_pcic_write32(ab, PCIE_TXVECDB, 0);
+}
+
+static void ath11k_mhi_reset_txvecstatus(struct ath11k_base *ab)
+{
+ ath11k_pcic_write32(ab, PCIE_TXVECSTATUS, 0);
+}
+
+static void ath11k_mhi_reset_rxvecdb(struct ath11k_base *ab)
+{
+ ath11k_pcic_write32(ab, PCIE_RXVECDB, 0);
+}
+
+static void ath11k_mhi_reset_rxvecstatus(struct ath11k_base *ab)
+{
+ ath11k_pcic_write32(ab, PCIE_RXVECSTATUS, 0);
+}
+
+void ath11k_mhi_clear_vector(struct ath11k_base *ab)
+{
+ ath11k_mhi_reset_txvecdb(ab);
+ ath11k_mhi_reset_txvecstatus(ab);
+ ath11k_mhi_reset_rxvecdb(ab);
+ ath11k_mhi_reset_rxvecstatus(ab);
+}
+
+static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ u32 user_base_data, base_vector;
+ int ret, num_vectors, i;
+ int *irq;
+ unsigned int msi_data;
+
+ ret = ath11k_pcic_get_user_msi_assignment(ab, "MHI", &num_vectors,
+ &user_base_data, &base_vector);
+ if (ret)
+ return ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "num_vectors %d base_vector %d\n",
+ num_vectors, base_vector);
+
+ irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL);
+ if (!irq)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vectors; i++) {
+ msi_data = base_vector;
+
+ if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ msi_data += i;
+
+ irq[i] = ath11k_pci_get_msi_irq(ab, msi_data);
+ }
+
+ ab_pci->mhi_ctrl->irq = irq;
+ ab_pci->mhi_ctrl->nr_irqs = num_vectors;
+
+ return 0;
+}
+
+static int ath11k_mhi_op_runtime_get(struct mhi_controller *mhi_cntrl)
+{
+ return 0;
+}
+
+static void ath11k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static char *ath11k_mhi_op_callback_to_str(enum mhi_callback reason)
+{
+ switch (reason) {
+ case MHI_CB_IDLE:
+ return "MHI_CB_IDLE";
+ case MHI_CB_PENDING_DATA:
+ return "MHI_CB_PENDING_DATA";
+ case MHI_CB_LPM_ENTER:
+ return "MHI_CB_LPM_ENTER";
+ case MHI_CB_LPM_EXIT:
+ return "MHI_CB_LPM_EXIT";
+ case MHI_CB_EE_RDDM:
+ return "MHI_CB_EE_RDDM";
+ case MHI_CB_EE_MISSION_MODE:
+ return "MHI_CB_EE_MISSION_MODE";
+ case MHI_CB_SYS_ERROR:
+ return "MHI_CB_SYS_ERROR";
+ case MHI_CB_FATAL_ERROR:
+ return "MHI_CB_FATAL_ERROR";
+ case MHI_CB_BW_REQ:
+ return "MHI_CB_BW_REQ";
+ default:
+ return "UNKNOWN";
+ }
+};
+
+static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
+ enum mhi_callback cb)
+{
+ struct ath11k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "notify status reason %s\n",
+ ath11k_mhi_op_callback_to_str(cb));
+
+ switch (cb) {
+ case MHI_CB_SYS_ERROR:
+ ath11k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n");
+ break;
+ case MHI_CB_EE_RDDM:
+ ath11k_warn(ab, "firmware crashed: MHI_CB_EE_RDDM\n");
+ if (ab_pci->mhi_pre_cb == MHI_CB_EE_RDDM) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "do not queue again for consecutive RDDM event\n");
+ break;
+ }
+
+ if (!(test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)))
+ queue_work(ab->workqueue_aux, &ab->reset_work);
+
+ break;
+ default:
+ break;
+ }
+
+ ab_pci->mhi_pre_cb = cb;
+}
+
+static int ath11k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr,
+ u32 *out)
+{
+ *out = readl(addr);
+
+ return 0;
+}
+
+static void ath11k_mhi_op_write_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr,
+ u32 val)
+{
+ writel(val, addr);
+}
+
+static int ath11k_mhi_read_addr_from_dt(struct mhi_controller *mhi_ctrl)
+{
+ struct device_node *np;
+ struct resource res;
+ int ret;
+
+ np = of_find_node_by_type(NULL, "memory");
+ if (!np)
+ return -ENOENT;
+
+ ret = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
+ if (ret)
+ return ret;
+
+ mhi_ctrl->iova_start = res.start + 0x1000000;
+ mhi_ctrl->iova_stop = res.end;
+
+ return 0;
+}
+
+int ath11k_mhi_register(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ struct mhi_controller *mhi_ctrl;
+ const struct mhi_controller_config *ath11k_mhi_config;
+ int ret;
+
+ mhi_ctrl = mhi_alloc_controller();
+ if (!mhi_ctrl)
+ return -ENOMEM;
+
+ ab_pci->mhi_ctrl = mhi_ctrl;
+ mhi_ctrl->cntrl_dev = ab->dev;
+ mhi_ctrl->regs = ab->mem;
+ mhi_ctrl->reg_len = ab->mem_len;
+
+ if (ab->fw.amss_data && ab->fw.amss_len > 0) {
+ /* use MHI firmware file from firmware-N.bin */
+ mhi_ctrl->fw_data = ab->fw.amss_data;
+ mhi_ctrl->fw_sz = ab->fw.amss_len;
+ } else {
+ /* use the old separate mhi.bin MHI firmware file */
+ ath11k_core_create_firmware_path(ab, ATH11K_AMSS_FILE,
+ ab_pci->amss_path,
+ sizeof(ab_pci->amss_path));
+ mhi_ctrl->fw_image = ab_pci->amss_path;
+ }
+
+ ret = ath11k_mhi_get_msi(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to get msi for mhi\n");
+ goto free_controller;
+ }
+
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
+
+ if (test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
+ ret = ath11k_mhi_read_addr_from_dt(mhi_ctrl);
+ if (ret < 0)
+ goto free_controller;
+ } else {
+ mhi_ctrl->iova_start = 0;
+ mhi_ctrl->iova_stop = ab_pci->dma_mask;
+ }
+
+ mhi_ctrl->rddm_size = RDDM_DUMP_SIZE;
+ mhi_ctrl->sbl_size = SZ_512K;
+ mhi_ctrl->seg_len = SZ_512K;
+ mhi_ctrl->fbc_download = true;
+ mhi_ctrl->runtime_get = ath11k_mhi_op_runtime_get;
+ mhi_ctrl->runtime_put = ath11k_mhi_op_runtime_put;
+ mhi_ctrl->status_cb = ath11k_mhi_op_status_cb;
+ mhi_ctrl->read_reg = ath11k_mhi_op_read_reg;
+ mhi_ctrl->write_reg = ath11k_mhi_op_write_reg;
+
+ switch (ab->hw_rev) {
+ case ATH11K_HW_QCN9074_HW10:
+ ath11k_mhi_config = &ath11k_mhi_config_qcn9074;
+ break;
+ case ATH11K_HW_QCA6390_HW20:
+ case ATH11K_HW_WCN6855_HW20:
+ case ATH11K_HW_WCN6855_HW21:
+ case ATH11K_HW_QCA2066_HW21:
+ case ATH11K_HW_QCA6698AQ_HW21:
+ ath11k_mhi_config = &ath11k_mhi_config_qca6390;
+ break;
+ default:
+ ath11k_err(ab, "failed assign mhi_config for unknown hw rev %d\n",
+ ab->hw_rev);
+ ret = -EINVAL;
+ goto free_controller;
+ }
+
+ ab_pci->mhi_pre_cb = MHI_CB_INVALID;
+ ret = mhi_register_controller(mhi_ctrl, ath11k_mhi_config);
+ if (ret) {
+ ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
+ goto free_controller;
+ }
+
+ return 0;
+
+free_controller:
+ mhi_free_controller(mhi_ctrl);
+ ab_pci->mhi_ctrl = NULL;
+ return ret;
+}
+
+void ath11k_mhi_unregister(struct ath11k_pci *ab_pci)
+{
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+
+ mhi_unregister_controller(mhi_ctrl);
+ kfree(mhi_ctrl->irq);
+ mhi_free_controller(mhi_ctrl);
+}
+
+int ath11k_mhi_start(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ int ret;
+
+ ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS;
+
+ ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to prepare mhi: %d", ret);
+ return ret;
+ }
+
+ ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to power up mhi: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_mhi_stop(struct ath11k_pci *ab_pci, bool is_suspend)
+{
+ /* During suspend we need to use mhi_power_down_keep_dev()
+ * workaround, otherwise ath11k_core_resume() will timeout
+ * during resume.
+ */
+ if (is_suspend)
+ mhi_power_down_keep_dev(ab_pci->mhi_ctrl, true);
+ else
+ mhi_power_down(ab_pci->mhi_ctrl, true);
+
+ mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
+}
+
+int ath11k_mhi_suspend(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ int ret;
+
+ ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to suspend mhi: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_mhi_resume(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ int ret;
+
+ /* Do force MHI resume as some devices like QCA6390, WCN6855
+ * are not in M3 state but they are functional. So just ignore
+ * the MHI state while resuming.
+ */
+ ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to resume mhi: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic)
+{
+ mhi_download_rddm_image(mhi_ctrl, in_panic);
+}
diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
new file mode 100644
index 000000000000..5c5c2b03c81f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mhi.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _ATH11K_MHI_H
+#define _ATH11K_MHI_H
+
+#include "pci.h"
+
+#define PCIE_TXVECDB 0x360
+#define PCIE_TXVECSTATUS 0x368
+#define PCIE_RXVECDB 0x394
+#define PCIE_RXVECSTATUS 0x39C
+
+#define MHISTATUS 0x48
+#define MHICTRL 0x38
+#define MHICTRL_RESET_MASK 0x2
+
+int ath11k_mhi_start(struct ath11k_pci *ar_pci);
+void ath11k_mhi_stop(struct ath11k_pci *ar_pci, bool is_suspend);
+int ath11k_mhi_register(struct ath11k_pci *ar_pci);
+void ath11k_mhi_unregister(struct ath11k_pci *ar_pci);
+void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab);
+void ath11k_mhi_clear_vector(struct ath11k_base *ab);
+
+int ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
+int ath11k_mhi_resume(struct ath11k_pci *ar_pci);
+void ath11k_mhi_coredump(struct mhi_controller *mhi_ctrl, bool in_panic);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/p2p.c b/drivers/net/wireless/ath/ath11k/p2p.c
new file mode 100644
index 000000000000..01e14523f1fe
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/p2p.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "wmi.h"
+#include "mac.h"
+#include "p2p.h"
+
+static void ath11k_p2p_noa_ie_fill(u8 *data, size_t len,
+ const struct ath11k_wmi_p2p_noa_info *noa)
+{
+ struct ieee80211_p2p_noa_attr *noa_attr;
+ u8 noa_descriptors, ctwindow;
+ bool oppps;
+ __le16 *noa_attr_len;
+ u16 attr_len;
+ int i;
+
+ ctwindow = u32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_CTWIN_TU);
+ oppps = u32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS);
+ noa_descriptors = u32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_DESC_NUM);
+
+ /* P2P IE */
+ data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ data[1] = len - 2;
+ data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+ /* NOA ATTR */
+ data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+ noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+ noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+ noa_attr->index = u32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_INDEX);
+ noa_attr->oppps_ctwindow = ctwindow;
+ if (oppps)
+ noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+ for (i = 0; i < noa_descriptors; i++) {
+ noa_attr->desc[i].count = noa->descriptors[i].type_count;
+ noa_attr->desc[i].duration =
+ cpu_to_le32(noa->descriptors[i].duration);
+ noa_attr->desc[i].interval =
+ cpu_to_le32(noa->descriptors[i].interval);
+ noa_attr->desc[i].start_time =
+ cpu_to_le32(noa->descriptors[i].start_time);
+ }
+
+ attr_len = 2; /* index + oppps_ctwindow */
+ attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+ *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static size_t
+ath11k_p2p_noa_ie_len_compute(const struct ath11k_wmi_p2p_noa_info *noa)
+{
+ size_t len = 0;
+ u8 noa_descriptors = u32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_DESC_NUM);
+
+ if (!(noa_descriptors) &&
+ !(u32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS)))
+ return 0;
+
+ len += 1 + 1 + 4; /* EID + len + OUI */
+ len += 1 + 2; /* noa attr + attr len */
+ len += 1 + 1; /* index + oppps_ctwindow */
+ len += noa_descriptors *
+ sizeof(struct ieee80211_p2p_noa_desc);
+
+ return len;
+}
+
+static void ath11k_p2p_noa_ie_assign(struct ath11k_vif *arvif, void *ie,
+ size_t len)
+{
+ struct ath11k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ kfree(arvif->u.ap.noa_data);
+
+ arvif->u.ap.noa_data = ie;
+ arvif->u.ap.noa_len = len;
+}
+
+static void __ath11k_p2p_noa_update(struct ath11k_vif *arvif,
+ const struct ath11k_wmi_p2p_noa_info *noa)
+{
+ struct ath11k *ar = arvif->ar;
+ void *ie;
+ size_t len;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath11k_p2p_noa_ie_assign(arvif, NULL, 0);
+
+ len = ath11k_p2p_noa_ie_len_compute(noa);
+ if (!len)
+ return;
+
+ ie = kmalloc(len, GFP_ATOMIC);
+ if (!ie)
+ return;
+
+ ath11k_p2p_noa_ie_fill(ie, len, noa);
+ ath11k_p2p_noa_ie_assign(arvif, ie, len); }
+
+void ath11k_p2p_noa_update(struct ath11k_vif *arvif,
+ const struct ath11k_wmi_p2p_noa_info *noa)
+{
+ struct ath11k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ __ath11k_p2p_noa_update(arvif, noa);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath11k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_p2p_noa_arg *arg = data;
+
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
+ ath11k_p2p_noa_update(arvif, arg->noa);
+}
+
+void ath11k_p2p_noa_update_by_vdev_id(struct ath11k *ar, u32 vdev_id,
+ const struct ath11k_wmi_p2p_noa_info *noa)
+{
+ struct ath11k_p2p_noa_arg arg = {
+ .vdev_id = vdev_id,
+ .noa = noa,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_p2p_noa_update_vdev_iter,
+ &arg);
+}
diff --git a/drivers/net/wireless/ath/ath11k/p2p.h b/drivers/net/wireless/ath/ath11k/p2p.h
new file mode 100644
index 000000000000..d907940a9b09
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/p2p.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_P2P_H
+#define ATH11K_P2P_H
+
+#include "wmi.h"
+
+struct ath11k_wmi_p2p_noa_info;
+
+struct ath11k_p2p_noa_arg {
+ u32 vdev_id;
+ const struct ath11k_wmi_p2p_noa_info *noa;
+};
+
+void ath11k_p2p_noa_update(struct ath11k_vif *arvif,
+ const struct ath11k_wmi_p2p_noa_info *noa);
+void ath11k_p2p_noa_update_by_vdev_id(struct ath11k *ar, u32 vdev_id,
+ const struct ath11k_wmi_p2p_noa_info *noa);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
new file mode 100644
index 000000000000..7114eca8810d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -0,0 +1,1322 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/of.h>
+#include <linux/time.h>
+#include <linux/vmalloc.h>
+
+#include "pci.h"
+#include "core.h"
+#include "hif.h"
+#include "mhi.h"
+#include "debug.h"
+#include "pcic.h"
+#include "qmi.h"
+
+#define ATH11K_PCI_BAR_NUM 0
+#define ATH11K_PCI_DMA_MASK 36
+#define ATH11K_PCI_COHERENT_DMA_MASK 32
+
+#define TCSR_SOC_HW_VERSION 0x0224
+#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
+#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
+
+#define QCA6390_DEVICE_ID 0x1101
+#define QCN9074_DEVICE_ID 0x1104
+#define WCN6855_DEVICE_ID 0x1103
+
+#define TCSR_SOC_HW_SUB_VER 0x1910010
+
+static const struct pci_device_id ath11k_pci_id_table[] = {
+ { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
+ { PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
+ { PCI_VDEVICE(QCOM, QCN9074_DEVICE_ID) },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table);
+
+static int ath11k_pci_bus_wake_up(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+}
+
+static void ath11k_pci_bus_release(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+}
+
+static u32 ath11k_pci_get_window_start(struct ath11k_base *ab, u32 offset)
+{
+ if (!ab->hw_params.static_window_map)
+ return ATH11K_PCI_WINDOW_START;
+
+ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
+ /* if offset lies within DP register range, use 3rd window */
+ return 3 * ATH11K_PCI_WINDOW_START;
+ else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
+ ATH11K_PCI_WINDOW_RANGE_MASK)
+ /* if offset lies within CE register range, use 2nd window */
+ return 2 * ATH11K_PCI_WINDOW_START;
+ else
+ return ATH11K_PCI_WINDOW_START;
+}
+
+static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ u32 window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset);
+
+ lockdep_assert_held(&ab_pci->window_lock);
+
+ if (window != ab_pci->register_window) {
+ iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
+ ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
+ ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
+ ab_pci->register_window = window;
+ }
+}
+
+static void
+ath11k_pci_window_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ u32 window_start;
+
+ window_start = ath11k_pci_get_window_start(ab, offset);
+
+ if (window_start == ATH11K_PCI_WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ } else {
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ }
+}
+
+static u32 ath11k_pci_window_read32(struct ath11k_base *ab, u32 offset)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ u32 window_start, val;
+
+ window_start = ath11k_pci_get_window_start(ab, offset);
+
+ if (window_start == ATH11K_PCI_WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ } else {
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ }
+
+ return val;
+}
+
+int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
+{
+ struct pci_dev *pci_dev = to_pci_dev(ab->dev);
+
+ return pci_irq_vector(pci_dev, vector);
+}
+
+static const struct ath11k_pci_ops ath11k_pci_ops_qca6390 = {
+ .wakeup = ath11k_pci_bus_wake_up,
+ .release = ath11k_pci_bus_release,
+ .get_msi_irq = ath11k_pci_get_msi_irq,
+ .window_write32 = ath11k_pci_window_write32,
+ .window_read32 = ath11k_pci_window_read32,
+};
+
+static const struct ath11k_pci_ops ath11k_pci_ops_qcn9074 = {
+ .wakeup = NULL,
+ .release = NULL,
+ .get_msi_irq = ath11k_pci_get_msi_irq,
+ .window_write32 = ath11k_pci_window_write32,
+ .window_read32 = ath11k_pci_window_read32,
+};
+
+static const struct ath11k_msi_config msi_config_one_msi = {
+ .total_vectors = 1,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 1, .base_vector = 0 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
+ { .name = "DP", .num_vectors = 1, .base_vector = 0 },
+ },
+};
+
+static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci)
+{
+ u32 umac_window;
+ u32 ce_window;
+ u32 window;
+
+ umac_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET);
+ ce_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE);
+ window = (umac_window << 12) | (ce_window << 6);
+
+ iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
+ ab_pci->ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
+}
+
+static void ath11k_pci_restore_window(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ spin_lock_bh(&ab_pci->window_lock);
+
+ iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | ab_pci->register_window,
+ ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
+ ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
+
+ spin_unlock_bh(&ab_pci->window_lock);
+}
+
+static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
+{
+ u32 val, delay;
+
+ val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
+
+ val |= PCIE_SOC_GLOBAL_RESET_V;
+
+ ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+
+ /* TODO: exact time to sleep is uncertain */
+ delay = 10;
+ mdelay(delay);
+
+ /* Need to toggle V bit back otherwise stuck in reset status */
+ val &= ~PCIE_SOC_GLOBAL_RESET_V;
+
+ ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+
+ mdelay(delay);
+
+ val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
+ if (val == 0xffffffff)
+ ath11k_warn(ab, "link down error during global reset\n");
+
+ /* Restore window register as its content is cleared during
+ * hardware global reset, such that it aligns with host cache.
+ */
+ ath11k_pci_restore_window(ab);
+}
+
+static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
+{
+ u32 val;
+
+ /* read cookie */
+ val = ath11k_pcic_read32(ab, PCIE_Q6_COOKIE_ADDR);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "pcie_q6_cookie_addr 0x%x\n", val);
+
+ val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "wlaon_warm_sw_entry 0x%x\n", val);
+
+ /* TODO: exact time to sleep is uncertain */
+ mdelay(10);
+
+ /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
+ * continuing warm path and entering dead loop.
+ */
+ ath11k_pcic_write32(ab, WLAON_WARM_SW_ENTRY, 0);
+ mdelay(10);
+
+ val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "wlaon_warm_sw_entry 0x%x\n", val);
+
+ /* A read clear register. clear the register to prevent
+ * Q6 from entering wrong code path.
+ */
+ val = ath11k_pcic_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause %d\n", val);
+}
+
+static int ath11k_pci_set_link_reg(struct ath11k_base *ab,
+ u32 offset, u32 value, u32 mask)
+{
+ u32 v;
+ int i;
+
+ v = ath11k_pcic_read32(ab, offset);
+ if ((v & mask) == value)
+ return 0;
+
+ for (i = 0; i < 10; i++) {
+ ath11k_pcic_write32(ab, offset, (v & ~mask) | value);
+
+ v = ath11k_pcic_read32(ab, offset);
+ if ((v & mask) == value)
+ return 0;
+
+ mdelay(2);
+ }
+
+ ath11k_warn(ab, "failed to set pcie link register 0x%08x: 0x%08x != 0x%08x\n",
+ offset, v & mask, value);
+
+ return -ETIMEDOUT;
+}
+
+static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_pci_set_link_reg(ab,
+ PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab),
+ PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
+ PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
+ if (ret) {
+ ath11k_warn(ab, "failed to set sysclk: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_pci_set_link_reg(ab,
+ PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab),
+ PCIE_PCS_OSC_DTCT_CONFIG1_VAL,
+ PCIE_PCS_OSC_DTCT_CONFIG_MSK);
+ if (ret) {
+ ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_pci_set_link_reg(ab,
+ PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab),
+ PCIE_PCS_OSC_DTCT_CONFIG2_VAL,
+ PCIE_PCS_OSC_DTCT_CONFIG_MSK);
+ if (ret) {
+ ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_pci_set_link_reg(ab,
+ PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab),
+ PCIE_PCS_OSC_DTCT_CONFIG4_VAL,
+ PCIE_PCS_OSC_DTCT_CONFIG_MSK);
+ if (ret) {
+ ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath11k_pci_enable_ltssm(struct ath11k_base *ab)
+{
+ u32 val;
+ int i;
+
+ val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
+
+ /* PCIE link seems very unstable after the Hot Reset*/
+ for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
+ if (val == 0xffffffff)
+ mdelay(5);
+
+ ath11k_pcic_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
+ val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "ltssm 0x%x\n", val);
+
+ val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
+ val |= GCC_GCC_PCIE_HOT_RST_VAL;
+ ath11k_pcic_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
+ val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "pcie_hot_rst 0x%x\n", val);
+
+ mdelay(5);
+}
+
+static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab)
+{
+ /* This is a WAR for PCIE Hotreset.
+ * When target receive Hotreset, but will set the interrupt.
+ * So when download SBL again, SBL will open Interrupt and
+ * receive it, and crash immediately.
+ */
+ ath11k_pcic_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
+}
+
+static void ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base *ab)
+{
+ u32 val;
+
+ val = ath11k_pcic_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
+ val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
+ ath11k_pcic_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
+}
+
+static void ath11k_pci_force_wake(struct ath11k_base *ab)
+{
+ ath11k_pcic_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
+ mdelay(5);
+}
+
+static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
+{
+ mdelay(100);
+
+ if (power_on) {
+ ath11k_pci_enable_ltssm(ab);
+ ath11k_pci_clear_all_intrs(ab);
+ ath11k_pci_set_wlaon_pwr_ctrl(ab);
+ if (ab->hw_params.fix_l1ss)
+ ath11k_pci_fix_l1ss(ab);
+ }
+
+ ath11k_mhi_clear_vector(ab);
+ ath11k_pci_clear_dbg_registers(ab);
+ ath11k_pci_soc_global_reset(ab);
+ ath11k_mhi_set_mhictrl_reset(ab);
+}
+
+static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
+{
+ struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+
+ cfg->tgt_ce = ab->hw_params.target_ce_config;
+ cfg->tgt_ce_len = ab->hw_params.target_ce_count;
+
+ cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
+ cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
+ ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
+
+ ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2,
+ &cfg->shadow_reg_v2_len);
+}
+
+static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable)
+{
+ struct pci_dev *dev = ab_pci->pdev;
+ u16 control;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
+
+ if (enable)
+ control |= PCI_MSI_FLAGS_ENABLE;
+ else
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
+}
+
+static void ath11k_pci_msi_enable(struct ath11k_pci *ab_pci)
+{
+ ath11k_pci_msi_config(ab_pci, true);
+}
+
+static void ath11k_pci_msi_disable(struct ath11k_pci *ab_pci)
+{
+ ath11k_pci_msi_config(ab_pci, false);
+}
+
+static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
+ struct pci_dev *pci_dev = ab_pci->pdev;
+ struct msi_desc *msi_desc;
+ int num_vectors;
+ int ret;
+
+ num_vectors = pci_alloc_irq_vectors(pci_dev,
+ msi_config->total_vectors,
+ msi_config->total_vectors,
+ PCI_IRQ_MSI);
+ if (num_vectors == msi_config->total_vectors) {
+ set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
+ } else {
+ num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
+ 1,
+ 1,
+ PCI_IRQ_MSI);
+ if (num_vectors < 0) {
+ ret = -EINVAL;
+ goto reset_msi_config;
+ }
+ clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
+ ab->pci.msi.config = &msi_config_one_msi;
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "request one msi vector\n");
+ }
+ ath11k_info(ab, "MSI vectors: %d\n", num_vectors);
+
+ ath11k_pci_msi_disable(ab_pci);
+
+ msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
+ if (!msi_desc) {
+ ath11k_err(ab, "msi_desc is NULL!\n");
+ ret = -EINVAL;
+ goto free_msi_vector;
+ }
+
+ ab->pci.msi.ep_base_data = msi_desc->msg.data;
+
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
+ &ab->pci.msi.addr_lo);
+
+ if (msi_desc->pci.msi_attrib.is_64) {
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
+ &ab->pci.msi.addr_hi);
+ } else {
+ ab->pci.msi.addr_hi = 0;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab->pci.msi.ep_base_data);
+
+ return 0;
+
+free_msi_vector:
+ pci_free_irq_vectors(ab_pci->pdev);
+
+reset_msi_config:
+ return ret;
+}
+
+static void ath11k_pci_free_msi(struct ath11k_pci *ab_pci)
+{
+ pci_free_irq_vectors(ab_pci->pdev);
+}
+
+static int ath11k_pci_config_msi_data(struct ath11k_pci *ab_pci)
+{
+ struct msi_desc *msi_desc;
+
+ msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
+ if (!msi_desc) {
+ ath11k_err(ab_pci->ab, "msi_desc is NULL!\n");
+ pci_free_irq_vectors(ab_pci->pdev);
+ return -EINVAL;
+ }
+
+ ab_pci->ab->pci.msi.ep_base_data = msi_desc->msg.data;
+
+ ath11k_dbg(ab_pci->ab, ATH11K_DBG_PCI, "after request_irq msi_ep_base_data %d\n",
+ ab_pci->ab->pci.msi.ep_base_data);
+
+ return 0;
+}
+
+static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ u16 device_id;
+ int ret = 0;
+
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+ if (device_id != ab_pci->dev_id) {
+ ath11k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
+ device_id, ab_pci->dev_id);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = pci_assign_resource(pdev, ATH11K_PCI_BAR_NUM);
+ if (ret) {
+ ath11k_err(ab, "failed to assign pci resource: %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ ath11k_err(ab, "failed to enable pci device: %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_request_region(pdev, ATH11K_PCI_BAR_NUM, "ath11k_pci");
+ if (ret) {
+ ath11k_err(ab, "failed to request pci region: %d\n", ret);
+ goto disable_device;
+ }
+
+ ret = dma_set_mask(&pdev->dev,
+ DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
+ if (ret) {
+ ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
+ ATH11K_PCI_DMA_MASK, ret);
+ goto release_region;
+ }
+
+ ab_pci->dma_mask = DMA_BIT_MASK(ATH11K_PCI_DMA_MASK);
+
+ ret = dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(ATH11K_PCI_COHERENT_DMA_MASK));
+ if (ret) {
+ ath11k_err(ab, "failed to set pci coherent dma mask to %d: %d\n",
+ ATH11K_PCI_COHERENT_DMA_MASK, ret);
+ goto release_region;
+ }
+
+ pci_set_master(pdev);
+
+ ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM);
+ ab->mem = pci_iomap(pdev, ATH11K_PCI_BAR_NUM, 0);
+ if (!ab->mem) {
+ ath11k_err(ab, "failed to map pci bar %d\n", ATH11K_PCI_BAR_NUM);
+ ret = -EIO;
+ goto release_region;
+ }
+
+ ab->mem_ce = ab->mem;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci_mem 0x%p\n", ab->mem);
+ return 0;
+
+release_region:
+ pci_release_region(pdev, ATH11K_PCI_BAR_NUM);
+disable_device:
+ pci_disable_device(pdev);
+out:
+ return ret;
+}
+
+static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ struct pci_dev *pci_dev = ab_pci->pdev;
+
+ pci_iounmap(pci_dev, ab->mem);
+ ab->mem = NULL;
+ pci_release_region(pci_dev, ATH11K_PCI_BAR_NUM);
+ if (pci_is_enabled(pci_dev))
+ pci_disable_device(pci_dev);
+}
+
+static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ &ab_pci->link_ctl);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "link_ctl 0x%04x L0s %d L1 %d\n",
+ ab_pci->link_ctl,
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+
+ /* disable L0s and L1 */
+ pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC);
+
+ set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
+}
+
+static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
+{
+ if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
+ pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC,
+ ab_pci->link_ctl &
+ PCI_EXP_LNKCTL_ASPMC);
+}
+
+#ifdef CONFIG_DEV_COREDUMP
+static int ath11k_pci_coredump_calculate_size(struct ath11k_base *ab, u32 *dump_seg_sz)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+ struct image_info *rddm_img, *fw_img;
+ struct ath11k_tlv_dump_data *dump_tlv;
+ enum ath11k_fw_crash_dump_type mem_type;
+ u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0;
+ struct ath11k_dump_file_data *file_data;
+ int i;
+
+ rddm_img = mhi_ctrl->rddm_image;
+ if (!rddm_img) {
+ ath11k_err(ab, "No RDDM dump found\n");
+ return 0;
+ }
+
+ fw_img = mhi_ctrl->fbc_image;
+
+ for (i = 0; i < fw_img->entries ; i++) {
+ if (!fw_img->mhi_buf[i].buf)
+ continue;
+
+ paging_tlv_sz += fw_img->mhi_buf[i].len;
+ }
+ dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz;
+
+ for (i = 0; i < rddm_img->entries; i++) {
+ if (!rddm_img->mhi_buf[i].buf)
+ continue;
+
+ rddm_tlv_sz += rddm_img->mhi_buf[i].len;
+ }
+ dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ mem_type = ath11k_coredump_get_dump_type(ab->qmi.target_mem[i].type);
+
+ if (mem_type == FW_CRASH_DUMP_NONE)
+ continue;
+
+ if (mem_type == FW_CRASH_DUMP_TYPE_MAX) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "target mem region type %d not supported",
+ ab->qmi.target_mem[i].type);
+ continue;
+ }
+
+ if (!ab->qmi.target_mem[i].anyaddr)
+ continue;
+
+ dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size;
+ }
+
+ for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) {
+ if (!dump_seg_sz[i])
+ continue;
+
+ len += sizeof(*dump_tlv) + dump_seg_sz[i];
+ }
+
+ if (len)
+ len += sizeof(*file_data);
+
+ return len;
+}
+
+static void ath11k_pci_coredump_download(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+ struct image_info *rddm_img, *fw_img;
+ struct timespec64 timestamp;
+ int i, len, mem_idx;
+ enum ath11k_fw_crash_dump_type mem_type;
+ struct ath11k_dump_file_data *file_data;
+ struct ath11k_tlv_dump_data *dump_tlv;
+ size_t hdr_len = sizeof(*file_data);
+ void *buf;
+ u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = {};
+
+ ath11k_mhi_coredump(mhi_ctrl, false);
+
+ len = ath11k_pci_coredump_calculate_size(ab, dump_seg_sz);
+ if (!len) {
+ ath11k_warn(ab, "No crash dump data found for devcoredump");
+ return;
+ }
+
+ rddm_img = mhi_ctrl->rddm_image;
+ fw_img = mhi_ctrl->fbc_image;
+
+ /* dev_coredumpv() requires vmalloc data */
+ buf = vzalloc(len);
+ if (!buf)
+ return;
+
+ ab->dump_data = buf;
+ ab->ath11k_coredump_len = len;
+ file_data = ab->dump_data;
+ strscpy(file_data->df_magic, "ATH11K-FW-DUMP", sizeof(file_data->df_magic));
+ file_data->len = cpu_to_le32(len);
+ file_data->version = cpu_to_le32(ATH11K_FW_CRASH_DUMP_V2);
+ file_data->chip_id = cpu_to_le32(ab_pci->dev_id);
+ file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id);
+ file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus));
+ guid_gen(&file_data->guid);
+ ktime_get_real_ts64(&timestamp);
+ file_data->tv_sec = cpu_to_le64(timestamp.tv_sec);
+ file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec);
+ buf += hdr_len;
+ dump_tlv = buf;
+ dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]);
+ buf += COREDUMP_TLV_HDR_SIZE;
+
+ /* append all segments together as they are all part of a single contiguous
+ * block of memory
+ */
+ for (i = 0; i < fw_img->entries ; i++) {
+ if (!fw_img->mhi_buf[i].buf)
+ continue;
+
+ memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf,
+ fw_img->mhi_buf[i].len);
+ buf += fw_img->mhi_buf[i].len;
+ }
+
+ dump_tlv = buf;
+ dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]);
+ buf += COREDUMP_TLV_HDR_SIZE;
+
+ /* append all segments together as they are all part of a single contiguous
+ * block of memory
+ */
+ for (i = 0; i < rddm_img->entries; i++) {
+ if (!rddm_img->mhi_buf[i].buf)
+ continue;
+
+ memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf,
+ rddm_img->mhi_buf[i].len);
+ buf += rddm_img->mhi_buf[i].len;
+ }
+
+ mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA;
+ for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) {
+ if (mem_idx == FW_CRASH_DUMP_NONE)
+ continue;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ mem_type = ath11k_coredump_get_dump_type
+ (ab->qmi.target_mem[i].type);
+
+ if (mem_type != mem_idx)
+ continue;
+
+ if (!ab->qmi.target_mem[i].anyaddr) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "Skipping mem region type %d",
+ ab->qmi.target_mem[i].type);
+ continue;
+ }
+
+ dump_tlv = buf;
+ dump_tlv->type = cpu_to_le32(mem_idx);
+ dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]);
+ buf += COREDUMP_TLV_HDR_SIZE;
+
+ memcpy_fromio(buf, ab->qmi.target_mem[i].iaddr,
+ ab->qmi.target_mem[i].size);
+
+ buf += ab->qmi.target_mem[i].size;
+ }
+ }
+
+ queue_work(ab->workqueue, &ab->dump_work);
+}
+#endif
+
+static int ath11k_pci_power_up(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ int ret;
+
+ ab_pci->register_window = 0;
+ clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
+ ath11k_pci_sw_reset(ab_pci->ab, true);
+
+ /* Disable ASPM during firmware download due to problems switching
+ * to AMSS state.
+ */
+ ath11k_pci_aspm_disable(ab_pci);
+
+ ath11k_pci_msi_enable(ab_pci);
+
+ ret = ath11k_mhi_start(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to start mhi: %d\n", ret);
+ return ret;
+ }
+
+ if (ab->hw_params.static_window_map)
+ ath11k_pci_select_static_window(ab_pci);
+
+ return 0;
+}
+
+static void ath11k_pci_power_down(struct ath11k_base *ab, bool is_suspend)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ /* restore aspm in case firmware bootup fails */
+ ath11k_pci_aspm_restore(ab_pci);
+
+ ath11k_pci_force_wake(ab_pci->ab);
+
+ ath11k_pci_msi_disable(ab_pci);
+
+ ath11k_mhi_stop(ab_pci, is_suspend);
+ clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
+ ath11k_pci_sw_reset(ab_pci->ab, false);
+}
+
+static int ath11k_pci_hif_suspend(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
+
+ return ath11k_mhi_suspend(ar_pci);
+}
+
+static int ath11k_pci_hif_resume(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
+
+ return ath11k_mhi_resume(ar_pci);
+}
+
+static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab)
+{
+ ath11k_pcic_ce_irqs_enable(ab);
+}
+
+static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab)
+{
+ ath11k_pcic_ce_irq_disable_sync(ab);
+}
+
+static int ath11k_pci_start(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ /* TODO: for now don't restore ASPM in case of single MSI
+ * vector as MHI register reading in M2 causes system hang.
+ */
+ if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ ath11k_pci_aspm_restore(ab_pci);
+ else
+ ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n");
+
+ ath11k_pcic_start(ab);
+
+ return 0;
+}
+
+static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
+ .start = ath11k_pci_start,
+ .stop = ath11k_pcic_stop,
+ .read32 = ath11k_pcic_read32,
+ .write32 = ath11k_pcic_write32,
+ .read = ath11k_pcic_read,
+ .power_down = ath11k_pci_power_down,
+ .power_up = ath11k_pci_power_up,
+ .suspend = ath11k_pci_hif_suspend,
+ .resume = ath11k_pci_hif_resume,
+ .irq_enable = ath11k_pcic_ext_irq_enable,
+ .irq_disable = ath11k_pcic_ext_irq_disable,
+ .get_msi_address = ath11k_pcic_get_msi_address,
+ .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
+ .map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
+ .ce_irq_enable = ath11k_pci_hif_ce_irq_enable,
+ .ce_irq_disable = ath11k_pci_hif_ce_irq_disable,
+ .get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx,
+#ifdef CONFIG_DEV_COREDUMP
+ .coredump_download = ath11k_pci_coredump_download,
+#endif
+};
+
+static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor)
+{
+ u32 soc_hw_version;
+
+ soc_hw_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_VERSION);
+ *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
+ soc_hw_version);
+ *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
+ soc_hw_version);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "tcsr_soc_hw_version major %d minor %d\n",
+ *major, *minor);
+}
+
+static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
+ const struct cpumask *m)
+{
+ if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags))
+ return 0;
+
+ return irq_set_affinity_and_hint(ab_pci->pdev->irq, m);
+}
+
+static int ath11k_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_dev)
+{
+ struct ath11k_base *ab;
+ struct ath11k_pci *ab_pci;
+ u32 soc_hw_version_major, soc_hw_version_minor;
+ int ret;
+ u32 sub_version;
+
+ ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI);
+
+ if (!ab) {
+ dev_err(&pdev->dev, "failed to allocate ath11k base\n");
+ return -ENOMEM;
+ }
+
+ ab->dev = &pdev->dev;
+ pci_set_drvdata(pdev, ab);
+ ab_pci = ath11k_pci_priv(ab);
+ ab_pci->dev_id = pci_dev->device;
+ ab_pci->ab = ab;
+ ab_pci->pdev = pdev;
+ ab->hif.ops = &ath11k_pci_hif_ops;
+ ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
+ pci_set_drvdata(pdev, ab);
+ spin_lock_init(&ab_pci->window_lock);
+
+ /* Set fixed_mem_region to true for platforms support reserved memory
+ * from DT. If memory is reserved from DT for FW, ath11k driver need not
+ * allocate memory.
+ */
+ if (of_property_present(ab->dev->of_node, "memory-region"))
+ set_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags);
+
+ ret = ath11k_pci_claim(ab_pci, pdev);
+ if (ret) {
+ ath11k_err(ab, "failed to claim device: %d\n", ret);
+ goto err_free_core;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
+ ab->id.vendor = pdev->vendor;
+ ab->id.device = pdev->device;
+ ab->id.subsystem_vendor = pdev->subsystem_vendor;
+ ab->id.subsystem_device = pdev->subsystem_device;
+
+ switch (pci_dev->device) {
+ case QCA6390_DEVICE_ID:
+ ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
+ ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
+ &soc_hw_version_minor);
+ switch (soc_hw_version_major) {
+ case 2:
+ ab->hw_rev = ATH11K_HW_QCA6390_HW20;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported QCA6390 SOC hardware version: %d %d\n",
+ soc_hw_version_major, soc_hw_version_minor);
+ ret = -EOPNOTSUPP;
+ goto err_pci_free_region;
+ }
+
+ break;
+ case QCN9074_DEVICE_ID:
+ ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qcn9074);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
+ ab->hw_rev = ATH11K_HW_QCN9074_HW10;
+ break;
+ case WCN6855_DEVICE_ID:
+ ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
+ ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD;
+ ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
+ &soc_hw_version_minor);
+ switch (soc_hw_version_major) {
+ case 2:
+ switch (soc_hw_version_minor) {
+ case 0x00:
+ case 0x01:
+ ab->hw_rev = ATH11K_HW_WCN6855_HW20;
+ break;
+ case 0x10:
+ case 0x11:
+ sub_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_SUB_VER);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "sub_version 0x%x\n",
+ sub_version);
+ switch (sub_version) {
+ case 0x1019A0E1:
+ case 0x1019B0E1:
+ case 0x1019C0E1:
+ case 0x1019D0E1:
+ ab->hw_rev = ATH11K_HW_QCA2066_HW21;
+ break;
+ case 0x001e60e1:
+ ab->hw_rev = ATH11K_HW_QCA6698AQ_HW21;
+ break;
+ default:
+ ab->hw_rev = ATH11K_HW_WCN6855_HW21;
+ }
+ break;
+ default:
+ goto unsupported_wcn6855_soc;
+ }
+ break;
+ default:
+unsupported_wcn6855_soc:
+ dev_err(&pdev->dev, "Unsupported WCN6855 SOC hardware version: %d %d\n",
+ soc_hw_version_major, soc_hw_version_minor);
+ ret = -EOPNOTSUPP;
+ goto err_pci_free_region;
+ }
+
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
+ pci_dev->device);
+ ret = -EOPNOTSUPP;
+ goto err_pci_free_region;
+ }
+
+ ret = ath11k_pcic_init_msi_config(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init msi config: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
+ ret = ath11k_pci_alloc_msi(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to enable msi: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
+ ret = ath11k_core_pre_init(ab);
+ if (ret)
+ goto err_pci_disable_msi;
+
+ ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
+ if (ret) {
+ ath11k_err(ab, "failed to set irq affinity %d\n", ret);
+ goto err_pci_disable_msi;
+ }
+
+ ret = ath11k_mhi_register(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to register mhi: %d\n", ret);
+ goto err_irq_affinity_cleanup;
+ }
+
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ goto err_mhi_unregister;
+
+ ret = ath11k_ce_alloc_pipes(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
+ goto err_hal_srng_deinit;
+ }
+
+ ath11k_pci_init_qmi_ce_config(ab);
+
+ ret = ath11k_pcic_config_irq(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to config irq: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ /* kernel may allocate a dummy vector before request_irq and
+ * then allocate a real vector when request_irq is called.
+ * So get msi_data here again to avoid spurious interrupt
+ * as msi_data will configured to srngs.
+ */
+ ret = ath11k_pci_config_msi_data(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to config msi_data: %d\n", ret);
+ goto err_free_irq;
+ }
+
+ ret = ath11k_core_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init core: %d\n", ret);
+ goto err_free_irq;
+ }
+ ath11k_qmi_fwreset_from_cold_boot(ab);
+ return 0;
+
+err_free_irq:
+ /* __free_irq() expects the caller to have cleared the affinity hint */
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+ ath11k_pcic_free_irq(ab);
+
+err_ce_free:
+ ath11k_ce_free_pipes(ab);
+
+err_hal_srng_deinit:
+ ath11k_hal_srng_deinit(ab);
+
+err_mhi_unregister:
+ ath11k_mhi_unregister(ab_pci);
+
+err_irq_affinity_cleanup:
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+
+err_pci_disable_msi:
+ ath11k_pci_free_msi(ab_pci);
+
+err_pci_free_region:
+ ath11k_pci_free_region(ab_pci);
+
+err_free_core:
+ ath11k_core_free(ab);
+
+ return ret;
+}
+
+static void ath11k_pci_remove(struct pci_dev *pdev)
+{
+ struct ath11k_base *ab = pci_get_drvdata(pdev);
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_pci_power_down(ab, false);
+ ath11k_debugfs_soc_destroy(ab);
+ ath11k_qmi_deinit_service(ab);
+ ath11k_core_pm_notifier_unregister(ab);
+ goto qmi_fail;
+ }
+
+ set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
+
+ cancel_work_sync(&ab->reset_work);
+ cancel_work_sync(&ab->dump_work);
+ ath11k_core_deinit(ab);
+
+qmi_fail:
+ ath11k_fw_destroy(ab);
+ ath11k_mhi_unregister(ab_pci);
+
+ ath11k_pcic_free_irq(ab);
+ ath11k_pci_free_msi(ab_pci);
+ ath11k_pci_free_region(ab_pci);
+
+ ath11k_hal_srng_deinit(ab);
+ ath11k_ce_free_pipes(ab);
+ ath11k_core_free(ab);
+}
+
+static void ath11k_pci_shutdown(struct pci_dev *pdev)
+{
+ struct ath11k_base *ab = pci_get_drvdata(pdev);
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+ ath11k_pci_power_down(ab, false);
+}
+
+static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci suspend as qmi is not initialised\n");
+ return 0;
+ }
+
+ ret = ath11k_core_suspend(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to suspend core: %d\n", ret);
+
+ return 0;
+}
+
+static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci resume as qmi is not initialised\n");
+ return 0;
+ }
+
+ ret = ath11k_core_resume(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to resume core: %d\n", ret);
+
+ return ret;
+}
+
+static __maybe_unused int ath11k_pci_pm_suspend_late(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath11k_core_suspend_late(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to late suspend core: %d\n", ret);
+
+ /* Similar to ath11k_pci_pm_suspend(), we return success here
+ * even error happens, to allow system suspend/hibernation survive.
+ */
+ return 0;
+}
+
+static __maybe_unused int ath11k_pci_pm_resume_early(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath11k_core_resume_early(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to early resume core: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops __maybe_unused ath11k_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend,
+ ath11k_pci_pm_resume)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend_late,
+ ath11k_pci_pm_resume_early)
+};
+
+static struct pci_driver ath11k_pci_driver = {
+ .name = "ath11k_pci",
+ .id_table = ath11k_pci_id_table,
+ .probe = ath11k_pci_probe,
+ .remove = ath11k_pci_remove,
+ .shutdown = ath11k_pci_shutdown,
+#ifdef CONFIG_PM
+ .driver.pm = &ath11k_pci_pm_ops,
+#endif
+};
+
+static int ath11k_pci_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&ath11k_pci_driver);
+ if (ret)
+ pr_err("failed to register ath11k pci driver: %d\n",
+ ret);
+
+ return ret;
+}
+module_init(ath11k_pci_init);
+
+static void ath11k_pci_exit(void)
+{
+ pci_unregister_driver(&ath11k_pci_driver);
+}
+
+module_exit(ath11k_pci_exit);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11ax WLAN devices");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* firmware files */
+MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/*");
+MODULE_FIRMWARE(ATH11K_FW_DIR "/QCN9074/hw1.0/*");
+MODULE_FIRMWARE(ATH11K_FW_DIR "/WCN6855/hw2.0/*");
+MODULE_FIRMWARE(ATH11K_FW_DIR "/WCN6855/hw2.1/*");
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
new file mode 100644
index 000000000000..1e3005a4b64c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+#ifndef _ATH11K_PCI_H
+#define _ATH11K_PCI_H
+
+#include <linux/mhi.h>
+
+#include "core.h"
+
+#define PCIE_SOC_GLOBAL_RESET 0x3008
+#define PCIE_SOC_GLOBAL_RESET_V 1
+
+#define WLAON_WARM_SW_ENTRY 0x1f80504
+#define WLAON_SOC_RESET_CAUSE_REG 0x01f8060c
+
+#define PCIE_Q6_COOKIE_ADDR 0x01f80500
+#define PCIE_Q6_COOKIE_DATA 0xc0000000
+
+/* register to wake the UMAC from power collapse */
+#define PCIE_SCRATCH_0_SOC_PCIE_REG 0x4040
+
+/* register used for handshake mechanism to validate UMAC is awake */
+#define PCIE_SOC_WAKE_PCIE_LOCAL_REG 0x3004
+
+#define PCIE_PCIE_PARF_LTSSM 0x1e081b0
+#define PARM_LTSSM_VALUE 0x111
+
+#define GCC_GCC_PCIE_HOT_RST 0x1e402bc
+#define GCC_GCC_PCIE_HOT_RST_VAL 0x10
+
+#define PCIE_PCIE_INT_ALL_CLEAR 0x1e08228
+#define PCIE_SMLH_REQ_RST_LINK_DOWN 0x2
+#define PCIE_INT_CLEAR_ALL 0xffffffff
+
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab) \
+ ((ab)->hw_params.regs->pcie_qserdes_sysclk_en_sel)
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL 0x10
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK 0xffffffff
+#define PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab) \
+ ((ab)->hw_params.regs->pcie_pcs_osc_dtct_config_base)
+#define PCIE_PCS_OSC_DTCT_CONFIG1_VAL 0x02
+#define PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab) \
+ ((ab)->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0x4)
+#define PCIE_PCS_OSC_DTCT_CONFIG2_VAL 0x52
+#define PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab) \
+ ((ab)->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0xc)
+#define PCIE_PCS_OSC_DTCT_CONFIG4_VAL 0xff
+#define PCIE_PCS_OSC_DTCT_CONFIG_MSK 0x000000ff
+
+#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c
+#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4
+
+enum ath11k_pci_flags {
+ ATH11K_PCI_ASPM_RESTORE,
+};
+
+struct ath11k_pci {
+ struct pci_dev *pdev;
+ struct ath11k_base *ab;
+ u16 dev_id;
+ char amss_path[100];
+ struct mhi_controller *mhi_ctrl;
+ const struct ath11k_msi_config *msi_config;
+ enum mhi_callback mhi_pre_cb;
+ u32 register_window;
+
+ /* protects register_window above */
+ spinlock_t window_lock;
+
+ /* enum ath11k_pci_flags */
+ unsigned long flags;
+ u16 link_ctl;
+ u64 dma_mask;
+};
+
+static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
+{
+ return (struct ath11k_pci *)ab->drv_priv;
+}
+
+int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
new file mode 100644
index 000000000000..fc6e7da05c60
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -0,0 +1,865 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/export.h>
+#include "core.h"
+#include "pcic.h"
+#include "debug.h"
+
+static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
+ "bhi",
+ "mhi-er0",
+ "mhi-er1",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+static const struct ath11k_msi_config ath11k_msi_config[] = {
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_QCA6390_HW20,
+ },
+ {
+ .total_vectors = 16,
+ .total_users = 3,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 5, .base_vector = 3 },
+ { .name = "DP", .num_vectors = 8, .base_vector = 8 },
+ },
+ .hw_rev = ATH11K_HW_QCN9074_HW10,
+ },
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_WCN6855_HW20,
+ },
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_WCN6855_HW21,
+ },
+ {
+ .total_vectors = 28,
+ .total_users = 2,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "CE", .num_vectors = 10, .base_vector = 0 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 10 },
+ },
+ .hw_rev = ATH11K_HW_WCN6750_HW10,
+ },
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_QCA2066_HW21,
+ },
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_QCA6698AQ_HW21,
+ },
+};
+
+int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
+{
+ const struct ath11k_msi_config *msi_config;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) {
+ msi_config = &ath11k_msi_config[i];
+
+ if (msi_config->hw_rev == ab->hw_rev)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ath11k_msi_config)) {
+ ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n",
+ ab->hw_rev);
+ return -EINVAL;
+ }
+
+ ab->pci.msi.config = msi_config;
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
+
+static void __ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ if (offset < ATH11K_PCI_WINDOW_START)
+ iowrite32(value, ab->mem + offset);
+ else
+ ab->pci.ops->window_write32(ab, offset, value);
+}
+
+void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ int ret = 0;
+ bool wakeup_required;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup the device to access.
+ */
+ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
+ if (wakeup_required && ab->pci.ops->wakeup)
+ ret = ab->pci.ops->wakeup(ab);
+
+ __ath11k_pcic_write32(ab, offset, value);
+
+ if (wakeup_required && !ret && ab->pci.ops->release)
+ ab->pci.ops->release(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_write32);
+
+static u32 __ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
+{
+ u32 val;
+
+ if (offset < ATH11K_PCI_WINDOW_START)
+ val = ioread32(ab->mem + offset);
+ else
+ val = ab->pci.ops->window_read32(ab, offset);
+
+ return val;
+}
+
+u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
+{
+ int ret = 0;
+ u32 val;
+ bool wakeup_required;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup the device to access.
+ */
+ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
+ if (wakeup_required && ab->pci.ops->wakeup)
+ ret = ab->pci.ops->wakeup(ab);
+
+ val = __ath11k_pcic_read32(ab, offset);
+
+ if (wakeup_required && !ret && ab->pci.ops->release)
+ ab->pci.ops->release(ab);
+
+ return val;
+}
+EXPORT_SYMBOL(ath11k_pcic_read32);
+
+int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end)
+{
+ int ret = 0;
+ bool wakeup_required;
+ u32 *data = buf;
+ u32 i;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup the device to access.
+ */
+ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ end >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
+ if (wakeup_required && ab->pci.ops->wakeup) {
+ ret = ab->pci.ops->wakeup(ab);
+ if (ret) {
+ ath11k_warn(ab,
+ "wakeup failed, data may be invalid: %d",
+ ret);
+ /* Even though wakeup() failed, continue processing rather
+ * than returning because some parts of the data may still
+ * be valid and useful in some cases, e.g. could give us
+ * some clues on firmware crash.
+ * Mislead due to invalid data could be avoided because we
+ * are aware of the wakeup failure.
+ */
+ }
+ }
+
+ for (i = start; i < end + 1; i += 4)
+ *data++ = __ath11k_pcic_read32(ab, i);
+
+ if (wakeup_required && ab->pci.ops->release)
+ ab->pci.ops->release(ab);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_read);
+
+void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi)
+{
+ *msi_addr_lo = ab->pci.msi.addr_lo;
+ *msi_addr_hi = ab->pci.msi.addr_hi;
+}
+EXPORT_SYMBOL(ath11k_pcic_get_msi_address);
+
+int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
+ int idx;
+
+ for (idx = 0; idx < msi_config->total_users; idx++) {
+ if (strcmp(user_name, msi_config->users[idx].name) == 0) {
+ *num_vectors = msi_config->users[idx].num_vectors;
+ *base_vector = msi_config->users[idx].base_vector;
+ *user_base_data = *base_vector + ab->pci.msi.ep_base_data;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "msi assignment %s num_vectors %d user_base_data %u base_vector %u\n",
+ user_name, *num_vectors, *user_base_data,
+ *base_vector);
+
+ return 0;
+ }
+ }
+
+ ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment);
+
+void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx)
+{
+ u32 i, msi_data_idx;
+
+ for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ if (ce_id == i)
+ break;
+
+ msi_data_idx++;
+ }
+ *msi_idx = msi_data_idx;
+}
+EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx);
+
+static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab)
+{
+ int i, j;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++)
+ free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+ netif_napi_del(&irq_grp->napi);
+ free_netdev(irq_grp->napi_ndev);
+ }
+}
+
+void ath11k_pcic_free_irq(struct ath11k_base *ab)
+{
+ int i, irq_idx;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+ free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+ }
+
+ ath11k_pcic_free_ext_irq(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_free_irq);
+
+static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
+ enable_irq(ab->irq_num[irq_idx]);
+}
+
+static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+}
+
+static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab)
+{
+ int i;
+
+ clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_pcic_ce_irq_disable(ab, i);
+ }
+}
+
+static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t)
+{
+ struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
+ int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
+
+ ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ enable_irq(ce_pipe->ab->irq_num[irq_idx]);
+}
+
+static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ce_pipe *ce_pipe = arg;
+ struct ath11k_base *ab = ce_pipe->ab;
+ int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
+
+ if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
+ return IRQ_HANDLED;
+
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+
+ tasklet_schedule(&ce_pipe->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ struct ath11k_base *ab = irq_grp->ab;
+ int i;
+
+ /* In case of one MSI vector, we handle irq enable/disable
+ * in a uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
+{
+ int i;
+
+ clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ ath11k_pcic_ext_grp_disable(irq_grp);
+
+ if (irq_grp->napi_enabled) {
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ irq_grp->napi_enabled = false;
+ }
+ }
+}
+
+static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ struct ath11k_base *ab = irq_grp->ab;
+ int i;
+
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ if (!irq_grp->napi_enabled) {
+ napi_enable(&irq_grp->napi);
+ irq_grp->napi_enabled = true;
+ }
+ ath11k_pcic_ext_grp_enable(irq_grp);
+ }
+
+ set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+}
+EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable);
+
+static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab)
+{
+ int i, j, irq_idx;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
+{
+ __ath11k_pcic_ext_irq_disable(ab);
+ ath11k_pcic_sync_ext_irqs(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable);
+
+static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath11k_ext_irq_grp,
+ napi);
+ struct ath11k_base *ab = irq_grp->ab;
+ int work_done;
+ int i;
+
+ work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ext_irq_grp *irq_grp = arg;
+ struct ath11k_base *ab = irq_grp->ab;
+ int i;
+
+ if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+ return IRQ_HANDLED;
+
+ ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq %d\n", irq);
+
+ /* last interrupt received for this group */
+ irq_grp->timestamp = jiffies;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int
+ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
+{
+ return ab->pci.ops->get_msi_irq(ab, vector);
+}
+
+static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
+{
+ int i, j, n, ret, num_vectors = 0;
+ u32 user_base_data = 0, base_vector = 0;
+ struct ath11k_ext_irq_grp *irq_grp;
+ unsigned long irq_flags;
+
+ ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
+ &user_base_data,
+ &base_vector);
+ if (ret < 0)
+ return ret;
+
+ irq_flags = IRQF_SHARED;
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ irq_flags |= IRQF_NOBALANCING;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ irq_grp = &ab->ext_irq_grp[i];
+ u32 num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+ irq_grp->napi_ndev = alloc_netdev_dummy(0);
+ if (!irq_grp->napi_ndev) {
+ ret = -ENOMEM;
+ goto fail_allocate;
+ }
+
+ netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
+ ath11k_pcic_ext_grp_napi_poll);
+
+ if (ab->hw_params.ring_mask->tx[i] ||
+ ab->hw_params.ring_mask->rx[i] ||
+ ab->hw_params.ring_mask->rx_err[i] ||
+ ab->hw_params.ring_mask->rx_wbm_rel[i] ||
+ ab->hw_params.ring_mask->reo_status[i] ||
+ ab->hw_params.ring_mask->rxdma2host[i] ||
+ ab->hw_params.ring_mask->host2rxdma[i] ||
+ ab->hw_params.ring_mask->rx_mon_status[i]) {
+ num_irq = 1;
+ }
+
+ irq_grp->num_irq = num_irq;
+ irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ int irq_idx = irq_grp->irqs[j];
+ int vector = (i % num_vectors) + base_vector;
+ int irq = ath11k_pcic_get_msi_irq(ab, vector);
+
+ if (irq < 0) {
+ ret = irq;
+ goto fail_irq;
+ }
+
+ ab->irq_num[irq_idx] = irq;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "irq %d group %d\n", irq, i);
+
+ irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+ ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler,
+ irq_flags, "DP_EXT_IRQ", irq_grp);
+ if (ret) {
+ ath11k_err(ab, "failed request irq %d: %d\n",
+ vector, ret);
+ for (n = 0; n <= i; n++) {
+ irq_grp = &ab->ext_irq_grp[n];
+ free_netdev(irq_grp->napi_ndev);
+ }
+ return ret;
+ }
+ }
+ ath11k_pcic_ext_grp_disable(irq_grp);
+ }
+
+ return 0;
+fail_irq:
+ /* i ->napi_ndev was properly allocated. Free it also */
+ i += 1;
+fail_allocate:
+ for (n = 0; n < i; n++) {
+ irq_grp = &ab->ext_irq_grp[n];
+ free_netdev(irq_grp->napi_ndev);
+ }
+ return ret;
+}
+
+int ath11k_pcic_config_irq(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *ce_pipe;
+ u32 msi_data_start;
+ u32 msi_data_count, msi_data_idx;
+ u32 msi_irq_start;
+ unsigned int msi_data;
+ int irq, i, ret, irq_idx;
+ unsigned long irq_flags;
+
+ ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count,
+ &msi_data_start, &msi_irq_start);
+ if (ret)
+ return ret;
+
+ irq_flags = IRQF_SHARED;
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ irq_flags |= IRQF_NOBALANCING;
+
+ /* Configure CE irqs */
+ for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
+ irq = ath11k_pcic_get_msi_irq(ab, msi_data);
+ if (irq < 0)
+ return irq;
+
+ ce_pipe = &ab->ce.ce_pipe[i];
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+
+ tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet);
+
+ ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler,
+ irq_flags, irq_name[irq_idx], ce_pipe);
+ if (ret) {
+ ath11k_err(ab, "failed to request irq %d: %d\n",
+ irq_idx, ret);
+ return ret;
+ }
+
+ ab->irq_num[irq_idx] = irq;
+ msi_data_idx++;
+
+ ath11k_pcic_ce_irq_disable(ab, i);
+ }
+
+ ret = ath11k_pcic_ext_irq_config(ab);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_config_irq);
+
+void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_pcic_ce_irq_enable(ab, i);
+ }
+}
+EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable);
+
+static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+
+void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab)
+{
+ ath11k_pcic_ce_irqs_disable(ab);
+ ath11k_pcic_sync_ce_irqs(ab);
+ ath11k_pcic_kill_tasklets(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync);
+
+void ath11k_pcic_stop(struct ath11k_base *ab)
+{
+ ath11k_pcic_ce_irq_disable_sync(ab);
+ ath11k_ce_cleanup_pipes(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_stop);
+
+int ath11k_pcic_start(struct ath11k_base *ab)
+{
+ set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
+
+ ath11k_pcic_ce_irqs_enable(ab);
+ ath11k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_start);
+
+int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params.svc_to_ce_map[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe);
+
+int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
+ const struct ath11k_pci_ops *pci_ops)
+{
+ if (!pci_ops)
+ return 0;
+
+ /* Return error if mandatory pci_ops callbacks are missing */
+ if (!pci_ops->get_msi_irq || !pci_ops->window_write32 ||
+ !pci_ops->window_read32)
+ return -EINVAL;
+
+ ab->pci.ops = pci_ops;
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);
+
+void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
+ i == ATH11K_PCI_CE_WAKE_IRQ)
+ continue;
+ ath11k_pcic_ce_irq_enable(ab, i);
+ }
+}
+EXPORT_SYMBOL(ath11k_pci_enable_ce_irqs_except_wake_irq);
+
+void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+ struct ath11k_ce_pipe *ce_pipe;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ ce_pipe = &ab->ce.ce_pipe[i];
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
+ i == ATH11K_PCI_CE_WAKE_IRQ)
+ continue;
+
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+ synchronize_irq(ab->irq_num[irq_idx]);
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+EXPORT_SYMBOL(ath11k_pci_disable_ce_irqs_except_wake_irq);
diff --git a/drivers/net/wireless/ath/ath11k/pcic.h b/drivers/net/wireless/ath/ath11k/pcic.h
new file mode 100644
index 000000000000..ac012e88bf6d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pcic.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH11K_PCI_CMN_H
+#define _ATH11K_PCI_CMN_H
+
+#include "core.h"
+
+#define ATH11K_PCI_IRQ_CE0_OFFSET 3
+#define ATH11K_PCI_IRQ_DP_OFFSET 14
+
+#define ATH11K_PCI_CE_WAKE_IRQ 2
+
+#define ATH11K_PCI_WINDOW_ENABLE_BIT 0x40000000
+#define ATH11K_PCI_WINDOW_REG_ADDRESS 0x310c
+#define ATH11K_PCI_WINDOW_VALUE_MASK GENMASK(24, 19)
+#define ATH11K_PCI_WINDOW_START 0x80000
+#define ATH11K_PCI_WINDOW_RANGE_MASK GENMASK(18, 0)
+
+/* BAR0 + 4k is always accessible, and no
+ * need to force wakeup.
+ * 4K - 32 = 0xFE0
+ */
+#define ATH11K_PCI_ACCESS_ALWAYS_OFF 0xFE0
+
+int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector);
+void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value);
+u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset);
+void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi);
+void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
+void ath11k_pcic_free_irq(struct ath11k_base *ab);
+int ath11k_pcic_config_irq(struct ath11k_base *ab);
+void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab);
+void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab);
+void ath11k_pcic_stop(struct ath11k_base *ab);
+int ath11k_pcic_start(struct ath11k_base *ab);
+int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab);
+void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab);
+int ath11k_pcic_init_msi_config(struct ath11k_base *ab);
+int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
+ const struct ath11k_pci_ops *pci_ops);
+int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end);
+void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab);
+void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
new file mode 100644
index 000000000000..6d0126c39301
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+
+static struct ath11k_peer *ath11k_peer_find_list_by_id(struct ath11k_base *ab,
+ int peer_id)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (peer->peer_id != peer_id)
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+ if (!ether_addr_equal(peer->addr, addr))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
+ const u8 *addr)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (!ab->rhead_peer_addr)
+ return NULL;
+
+ peer = rhashtable_lookup_fast(ab->rhead_peer_addr, addr,
+ ab->rhash_peer_addr_param);
+
+ return peer;
+}
+
+struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
+ int peer_id)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (!ab->rhead_peer_id)
+ return NULL;
+
+ peer = rhashtable_lookup_fast(ab->rhead_peer_id, &peer_id,
+ ab->rhash_peer_id_param);
+
+ return peer;
+}
+
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (vdev_id == peer->vdev_id) {
+ spin_unlock_bh(&ab->base_lock);
+ return peer;
+ }
+ }
+ spin_unlock_bh(&ab->base_lock);
+ return NULL;
+}
+
+void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_list_by_id(ab, peer_id);
+ if (!peer) {
+ ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
+ peer_id);
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "peer unmap vdev %d peer %pM id %d\n",
+ peer->vdev_id, peer->addr, peer_id);
+
+ list_del(&peer->list);
+ kfree(peer);
+ wake_up(&ab->peer_mapping_wq);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, vdev_id, mac_addr);
+ if (!peer) {
+ peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+ if (!peer)
+ goto exit;
+
+ peer->vdev_id = vdev_id;
+ peer->peer_id = peer_id;
+ peer->ast_hash = ast_hash;
+ peer->hw_peer_id = hw_peer_id;
+ ether_addr_copy(peer->addr, mac_addr);
+ list_add(&peer->list, &ab->peers);
+ wake_up(&ab->peer_mapping_wq);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "peer map vdev %d peer %pM id %d\n",
+ vdev_id, mac_addr, peer_id);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr, bool expect_mapped)
+{
+ int ret;
+
+ ret = wait_event_timeout(ab->peer_mapping_wq, ({
+ bool mapped;
+
+ spin_lock_bh(&ab->base_lock);
+ mapped = !!ath11k_peer_find(ab, vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ (mapped == expect_mapped ||
+ test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags));
+ }), 3 * HZ);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static inline int ath11k_peer_rhash_insert(struct ath11k_base *ab,
+ struct rhashtable *rtbl,
+ struct rhash_head *rhead,
+ struct rhashtable_params *params,
+ void *key)
+{
+ struct ath11k_peer *tmp;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ tmp = rhashtable_lookup_get_insert_fast(rtbl, rhead, *params);
+
+ if (!tmp)
+ return 0;
+ else if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ else
+ return -EEXIST;
+}
+
+static inline int ath11k_peer_rhash_remove(struct ath11k_base *ab,
+ struct rhashtable *rtbl,
+ struct rhash_head *rhead,
+ struct rhashtable_params *params)
+{
+ int ret;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ ret = rhashtable_remove_fast(rtbl, rhead, *params);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ return 0;
+}
+
+static int ath11k_peer_rhash_add(struct ath11k_base *ab, struct ath11k_peer *peer)
+{
+ int ret;
+
+ lockdep_assert_held(&ab->base_lock);
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
+ return -EPERM;
+
+ ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_id, &peer->rhash_id,
+ &ab->rhash_peer_id_param, &peer->peer_id);
+ if (ret) {
+ ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_id ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_addr, &peer->rhash_addr,
+ &ab->rhash_peer_addr_param, &peer->addr);
+ if (ret) {
+ ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_addr ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ goto err_clean;
+ }
+
+ return 0;
+
+err_clean:
+ ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
+ &ab->rhash_peer_id_param);
+ return ret;
+}
+
+void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
+{
+ struct ath11k_peer *peer, *tmp;
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ mutex_lock(&ab->tbl_mtx_lock);
+ spin_lock_bh(&ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+
+ ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
+ peer->addr, vdev_id);
+
+ ath11k_peer_rhash_delete(ab, peer);
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
+}
+
+static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
+}
+
+int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
+ const u8 *addr)
+{
+ int ret;
+ unsigned long time_left;
+
+ ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed wait for peer deleted");
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->peer_delete_done,
+ 3 * HZ);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "Timeout in receiving peer delete response\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr)
+{
+ int ret;
+ struct ath11k_peer *peer;
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ mutex_lock(&ab->tbl_mtx_lock);
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_addr(ab, addr);
+ /* Check if the found peer is what we want to remove.
+ * While the sta is transitioning to another band we may
+ * have 2 peer with the same addr assigned to different
+ * vdev_id. Make sure we are deleting the correct peer.
+ */
+ if (peer && peer->vdev_id == vdev_id)
+ ath11k_peer_rhash_delete(ab, peer);
+
+ /* Fallback to peer list search if the correct peer can't be found.
+ * Skip the deletion of the peer from the rhash since it has already
+ * been deleted in peer add.
+ */
+ if (!peer)
+ peer = ath11k_peer_find(ab, vdev_id, addr);
+
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
+
+ ath11k_warn(ab,
+ "failed to find peer vdev_id %d addr %pM in delete\n",
+ vdev_id, addr);
+ return -EINVAL;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
+
+ reinit_completion(&ar->peer_delete_done);
+
+ ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to delete peer vdev_id %d addr %pM ret %d\n",
+ vdev_id, addr, ret);
+ return ret;
+ }
+
+ ret = ath11k_wait_for_peer_delete_done(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = __ath11k_peer_delete(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ ar->num_peers--;
+
+ return 0;
+}
+
+static int ath11k_wait_for_peer_created(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
+}
+
+int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta, struct peer_create_params *param)
+{
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ int ret, fbret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->num_peers > (ar->max_num_peers - 1)) {
+ ath11k_warn(ar->ab,
+ "failed to create peer due to insufficient peer entry resource in firmware\n");
+ return -ENOBUFS;
+ }
+
+ mutex_lock(&ar->ab->tbl_mtx_lock);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
+ if (peer) {
+ if (peer->vdev_id == param->vdev_id) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+ return -EINVAL;
+ }
+
+ /* Assume sta is transitioning to another band.
+ * Remove here the peer from rhash.
+ */
+ ath11k_peer_rhash_delete(ar->ab, peer);
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+
+ ret = ath11k_wmi_send_peer_create_cmd(ar, param);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send peer create vdev_id %d ret %d\n",
+ param->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_wait_for_peer_created(ar, param->vdev_id,
+ param->peer_addr);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ar->ab->tbl_mtx_lock);
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+ ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
+ param->peer_addr, param->vdev_id);
+
+ ret = -ENOENT;
+ goto cleanup;
+ }
+
+ ret = ath11k_peer_rhash_add(ar->ab, peer);
+ if (ret) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+ goto cleanup;
+ }
+
+ peer->pdev_idx = ar->pdev_idx;
+ peer->sta = sta;
+
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ arvif->ast_hash = peer->ast_hash;
+ arvif->ast_idx = peer->hw_peer_id;
+ }
+
+ peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
+ peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
+
+ if (sta) {
+ arsta = ath11k_sta_to_arsta(sta);
+ arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
+ FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
+ peer->peer_id);
+
+ /* set HTT extension valid bit to 0 by default */
+ arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+ }
+
+ ar->num_peers++;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+
+ return 0;
+
+cleanup:
+ fbret = __ath11k_peer_delete(ar, param->vdev_id, param->peer_addr);
+ if (fbret)
+ ath11k_warn(ar->ab, "failed peer %pM delete vdev_id %d fallback ret %d\n",
+ param->peer_addr, param->vdev_id, fbret);
+
+ return ret;
+}
+
+int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer)
+{
+ int ret;
+
+ lockdep_assert_held(&ab->base_lock);
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
+ return -EPERM;
+
+ ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_addr, &peer->rhash_addr,
+ &ab->rhash_peer_addr_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_addr ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
+ &ab->rhash_peer_id_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_id ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath11k_peer_rhash_id_tbl_init(struct ath11k_base *ab)
+{
+ struct rhashtable_params *param;
+ struct rhashtable *rhash_id_tbl;
+ int ret;
+ size_t size;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (ab->rhead_peer_id)
+ return 0;
+
+ size = sizeof(*ab->rhead_peer_id);
+ rhash_id_tbl = kzalloc(size, GFP_KERNEL);
+ if (!rhash_id_tbl) {
+ ath11k_warn(ab, "failed to init rhash id table due to no mem (size %zu)\n",
+ size);
+ return -ENOMEM;
+ }
+
+ param = &ab->rhash_peer_id_param;
+
+ param->key_offset = offsetof(struct ath11k_peer, peer_id);
+ param->head_offset = offsetof(struct ath11k_peer, rhash_id);
+ param->key_len = sizeof_field(struct ath11k_peer, peer_id);
+ param->automatic_shrinking = true;
+ param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
+
+ ret = rhashtable_init(rhash_id_tbl, param);
+ if (ret) {
+ ath11k_warn(ab, "failed to init peer id rhash table %d\n", ret);
+ goto err_free;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ if (!ab->rhead_peer_id) {
+ ab->rhead_peer_id = rhash_id_tbl;
+ } else {
+ spin_unlock_bh(&ab->base_lock);
+ goto cleanup_tbl;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+
+cleanup_tbl:
+ rhashtable_destroy(rhash_id_tbl);
+err_free:
+ kfree(rhash_id_tbl);
+
+ return ret;
+}
+
+static int ath11k_peer_rhash_addr_tbl_init(struct ath11k_base *ab)
+{
+ struct rhashtable_params *param;
+ struct rhashtable *rhash_addr_tbl;
+ int ret;
+ size_t size;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (ab->rhead_peer_addr)
+ return 0;
+
+ size = sizeof(*ab->rhead_peer_addr);
+ rhash_addr_tbl = kzalloc(size, GFP_KERNEL);
+ if (!rhash_addr_tbl) {
+ ath11k_warn(ab, "failed to init rhash addr table due to no mem (size %zu)\n",
+ size);
+ return -ENOMEM;
+ }
+
+ param = &ab->rhash_peer_addr_param;
+
+ param->key_offset = offsetof(struct ath11k_peer, addr);
+ param->head_offset = offsetof(struct ath11k_peer, rhash_addr);
+ param->key_len = sizeof_field(struct ath11k_peer, addr);
+ param->automatic_shrinking = true;
+ param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
+
+ ret = rhashtable_init(rhash_addr_tbl, param);
+ if (ret) {
+ ath11k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
+ goto err_free;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ if (!ab->rhead_peer_addr) {
+ ab->rhead_peer_addr = rhash_addr_tbl;
+ } else {
+ spin_unlock_bh(&ab->base_lock);
+ goto cleanup_tbl;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+
+cleanup_tbl:
+ rhashtable_destroy(rhash_addr_tbl);
+err_free:
+ kfree(rhash_addr_tbl);
+
+ return ret;
+}
+
+static inline void ath11k_peer_rhash_id_tbl_destroy(struct ath11k_base *ab)
+{
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_id)
+ return;
+
+ rhashtable_destroy(ab->rhead_peer_id);
+ kfree(ab->rhead_peer_id);
+ ab->rhead_peer_id = NULL;
+}
+
+static inline void ath11k_peer_rhash_addr_tbl_destroy(struct ath11k_base *ab)
+{
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_addr)
+ return;
+
+ rhashtable_destroy(ab->rhead_peer_addr);
+ kfree(ab->rhead_peer_addr);
+ ab->rhead_peer_addr = NULL;
+}
+
+int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab)
+{
+ int ret;
+
+ mutex_lock(&ab->tbl_mtx_lock);
+
+ ret = ath11k_peer_rhash_id_tbl_init(ab);
+ if (ret)
+ goto out;
+
+ ret = ath11k_peer_rhash_addr_tbl_init(ab);
+ if (ret)
+ goto cleanup_tbl;
+
+ mutex_unlock(&ab->tbl_mtx_lock);
+
+ return 0;
+
+cleanup_tbl:
+ ath11k_peer_rhash_id_tbl_destroy(ab);
+out:
+ mutex_unlock(&ab->tbl_mtx_lock);
+ return ret;
+}
+
+void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab)
+{
+ mutex_lock(&ab->tbl_mtx_lock);
+
+ ath11k_peer_rhash_addr_tbl_destroy(ab);
+ ath11k_peer_rhash_id_tbl_destroy(ab);
+
+ mutex_unlock(&ab->tbl_mtx_lock);
+}
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
new file mode 100644
index 000000000000..3ad2f3355b14
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_PEER_H
+#define ATH11K_PEER_H
+
+struct ath11k_peer {
+ struct list_head list;
+ struct ieee80211_sta *sta;
+ int vdev_id;
+ u8 addr[ETH_ALEN];
+ int peer_id;
+ u16 ast_hash;
+ u8 pdev_idx;
+ u16 hw_peer_id;
+
+ /* protected by ab->data_lock */
+ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+ struct dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
+
+ /* peer id based rhashtable list pointer */
+ struct rhash_head rhash_id;
+ /* peer addr based rhashtable list pointer */
+ struct rhash_head rhash_addr;
+
+ /* Info used in MMIC verification of
+ * RX fragments
+ */
+ struct crypto_shash *tfm_mmic;
+ u8 mcast_keyidx;
+ u8 ucast_keyidx;
+ u16 sec_type;
+ u16 sec_type_grp;
+ bool is_authorized;
+ bool dp_setup_done;
+};
+
+void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
+void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash, u16 hw_peer_id);
+struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id);
+void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id);
+int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr);
+int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta, struct peer_create_params *param);
+int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id);
+int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab);
+void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab);
+int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer);
+#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
new file mode 100644
index 000000000000..ff6a97e328b8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -0,0 +1,3366 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/elf.h>
+#include <linux/export.h>
+
+#include "qmi.h"
+#include "core.h"
+#include "debug.h"
+#include "hif.h"
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/ioport.h>
+#include <linux/firmware.h>
+#include <linux/of_irq.h>
+
+#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
+#define HOST_CSTATE_BIT 0x04
+#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08
+#define PLATFORM_CAP_PCIE_PME_D3COLD 0x10
+
+#define FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING="
+
+bool ath11k_cold_boot_cal = 1;
+EXPORT_SYMBOL(ath11k_cold_boot_cal);
+module_param_named(cold_boot_cal, ath11k_cold_boot_cal, bool, 0644);
+MODULE_PARM_DESC(cold_boot_cal,
+ "Decrease the channel switch time but increase the driver load time (Default: true)");
+
+static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ num_clients_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ num_clients),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ wake_msi_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ wake_msi),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01,
+ .elem_size = sizeof(u32),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ nm_modem_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ nm_modem),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_filesys_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_filesys_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_done_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_done),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_bucket_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_bucket),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_cfg_mode_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_cfg_mode),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ msa_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ msa_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ pin_connect_result_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ pin_connect_result_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ request_mem_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ request_mem_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_init_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_init_done_enable),
+ },
+
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ rejuvenate_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ rejuvenate_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ xo_cal_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ xo_cal_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ cal_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ cal_done_enable),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ fw_status_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ fw_status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_MEM_CFG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg),
+ .ei_array = qmi_wlanfw_mem_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_seg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+ mem_seg),
+ .ei_array = qmi_wlanfw_mem_seg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+ mem_seg),
+ .ei_array = qmi_wlanfw_mem_seg_resp_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_device_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlfw_device_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+ chip_family),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_board_info_s_v01,
+ board_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+ fw_version),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+ fw_build_timestamp),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_rf_chip_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ chip_info),
+ .ei_array = qmi_wlanfw_rf_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ board_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_rf_board_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ board_info),
+ .ei_array = qmi_wlanfw_rf_board_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ soc_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_soc_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ soc_info),
+ .ei_array = qmi_wlanfw_soc_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_version_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_fw_version_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_version_info),
+ .ei_array = qmi_wlanfw_fw_version_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_build_id_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_build_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ num_macs_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ num_macs),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ voltage_mv_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ voltage_mv),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ time_freq_hz_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ time_freq_hz),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ otp_version_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ otp_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ eeprom_read_timeout_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ eeprom_read_timeout),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLANFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ bdf_type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ bdf_type),
+ },
+
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ nentries),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ nbytes_max),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ flags),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ service_id),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ hw_debug_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ hw_debug),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ host_version_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLANFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ host_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(
+ struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg),
+ .ei_array = qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SVC_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg),
+ .ei_array = qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg),
+ .ei_array = qmi_wlanfw_shadow_reg_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2),
+ .ei_array = qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+ enablefwlog_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+ enablefwlog),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_host_cap_req_msg_v01 req;
+ struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.num_clients_valid = 1;
+ req.num_clients = 1;
+ req.mem_cfg_mode = ab->qmi.target_mem_mode;
+ req.mem_cfg_mode_valid = 1;
+ req.bdf_support_valid = 1;
+ req.bdf_support = 1;
+
+ if (ab->hw_params.m3_fw_support) {
+ req.m3_support_valid = 1;
+ req.m3_support = 1;
+ req.m3_cache_support_valid = 1;
+ req.m3_cache_support = 1;
+ } else {
+ req.m3_support_valid = 0;
+ req.m3_support = 0;
+ req.m3_cache_support_valid = 0;
+ req.m3_cache_support = 0;
+ }
+
+ req.cal_done_valid = 1;
+ req.cal_done = ab->qmi.cal_done;
+
+ if (ab->hw_params.internal_sleep_clock) {
+ req.nm_modem_valid = 1;
+
+ /* Notify firmware that this is non-qualcomm platform. */
+ req.nm_modem |= HOST_CSTATE_BIT;
+
+ /* Notify firmware about the sleep clock selection,
+ * nm_modem_bit[1] is used for this purpose. Host driver on
+ * non-qualcomm platforms should select internal sleep
+ * clock.
+ */
+ req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
+ }
+
+ if (ab->hw_params.global_reset)
+ req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
+
+ req.nm_modem |= PLATFORM_CAP_PCIE_PME_D3COLD;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "host cap request\n");
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_HOST_CAP_REQ_V01,
+ QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send host capability request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "host capability request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_ind_register_req_msg_v01 *req;
+ struct qmi_wlanfw_ind_register_resp_msg_v01 *resp;
+ struct qmi_handle *handle = &ab->qmi.handle;
+ struct qmi_txn txn;
+ int ret;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ ret = -ENOMEM;
+ goto resp_out;
+ }
+
+ req->client_id_valid = 1;
+ req->client_id = QMI_WLANFW_CLIENT_ID;
+ req->fw_ready_enable_valid = 1;
+ req->fw_ready_enable = 1;
+ req->cal_done_enable_valid = 1;
+ req->cal_done_enable = 1;
+ req->fw_init_done_enable_valid = 1;
+ req->fw_init_done_enable = 1;
+
+ req->pin_connect_result_enable_valid = 0;
+ req->pin_connect_result_enable = 0;
+
+ /* WCN6750 doesn't request for DDR memory via QMI,
+ * instead it uses a fixed 12MB reserved memory
+ * region in DDR.
+ */
+ if (!ab->hw_params.fixed_fw_mem) {
+ req->request_mem_enable_valid = 1;
+ req->request_mem_enable = 1;
+ req->fw_mem_ready_enable_valid = 1;
+ req->fw_mem_ready_enable = 1;
+ }
+
+ ret = qmi_txn_init(handle, &txn,
+ qmi_wlanfw_ind_register_resp_msg_v01_ei, resp);
+ if (ret < 0)
+ goto out;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "indication register request\n");
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_IND_REGISTER_REQ_V01,
+ QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_ind_register_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send indication register request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to register fw indication: %d\n", ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "firmware indication register request failed: %d %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(resp);
+resp_out:
+ kfree(req);
+ return ret;
+}
+
+static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
+ struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
+ struct qmi_txn txn;
+ int ret = 0, i;
+ bool delayed;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ /* For QCA6390 by default FW requests a block of ~4M contiguous
+ * DMA memory, it's hard to allocate from OS. So host returns
+ * failure to FW and FW will then request multiple blocks of small
+ * chunk size memory.
+ */
+ if (!(ab->hw_params.fixed_mem_region ||
+ test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
+ ab->qmi.target_mem_delayed) {
+ delayed = true;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "delays mem_request %d\n",
+ ab->qmi.mem_seg_count);
+ memset(req, 0, sizeof(*req));
+ } else {
+ delayed = false;
+ req->mem_seg_len = ab->qmi.mem_seg_count;
+
+ for (i = 0; i < req->mem_seg_len ; i++) {
+ req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
+ req->mem_seg[i].size = ab->qmi.target_mem[i].size;
+ req->mem_seg[i].type = ab->qmi.target_mem[i].type;
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "req mem_seg[%d] %pad %u %u\n", i,
+ &ab->qmi.target_mem[i].paddr,
+ ab->qmi.target_mem[i].size,
+ ab->qmi.target_mem[i].type);
+ }
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "respond memory request delayed %i\n",
+ delayed);
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_RESPOND_MEM_REQ_V01,
+ QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to respond qmi memory request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait qmi memory request: %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* the error response is expected when
+ * target_mem_delayed is true.
+ */
+ if (delayed && resp.resp.error == 0)
+ goto out;
+
+ ath11k_warn(ab, "qmi respond memory request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ kfree(req);
+ return ret;
+}
+
+static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ if (!ab->qmi.target_mem[i].anyaddr)
+ continue;
+
+ if (ab->hw_params.fixed_mem_region ||
+ test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
+ iounmap(ab->qmi.target_mem[i].iaddr);
+ ab->qmi.target_mem[i].iaddr = NULL;
+ continue;
+ }
+
+ dma_free_coherent(ab->dev,
+ ab->qmi.target_mem[i].prev_size,
+ ab->qmi.target_mem[i].vaddr,
+ ab->qmi.target_mem[i].paddr);
+ ab->qmi.target_mem[i].vaddr = NULL;
+ }
+}
+
+static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
+{
+ int i;
+ struct target_mem_chunk *chunk;
+
+ ab->qmi.target_mem_delayed = false;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ chunk = &ab->qmi.target_mem[i];
+
+ /* Firmware reloads in coldboot/firmware recovery.
+ * in such case, no need to allocate memory for FW again.
+ */
+ if (chunk->vaddr) {
+ if (chunk->prev_type == chunk->type &&
+ chunk->prev_size == chunk->size)
+ continue;
+
+ if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "size/type mismatch (current %d %u) (prev %d %u), try later with small size\n",
+ chunk->size, chunk->type,
+ chunk->prev_size, chunk->prev_type);
+ ab->qmi.target_mem_delayed = true;
+ return 0;
+ }
+
+ /* cannot reuse the existing chunk */
+ dma_free_coherent(ab->dev, chunk->prev_size,
+ chunk->vaddr, chunk->paddr);
+ chunk->vaddr = NULL;
+ }
+
+ chunk->vaddr = dma_alloc_coherent(ab->dev,
+ chunk->size,
+ &chunk->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!chunk->vaddr) {
+ if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "dma allocation failed (%d B type %u), will try later with small size\n",
+ chunk->size,
+ chunk->type);
+ ath11k_qmi_free_target_mem_chunk(ab);
+ ab->qmi.target_mem_delayed = true;
+ return 0;
+ }
+
+ ath11k_err(ab, "failed to allocate dma memory for qmi (%d B type %u)\n",
+ chunk->size,
+ chunk->type);
+ return -EINVAL;
+ }
+ chunk->prev_type = chunk->type;
+ chunk->prev_size = chunk->size;
+ }
+
+ return 0;
+}
+
+static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
+{
+ struct device *dev = ab->dev;
+ struct resource res = {};
+ u32 host_ddr_sz;
+ int i, idx, ret;
+
+ for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
+ switch (ab->qmi.target_mem[i].type) {
+ case HOST_DDR_REGION_TYPE:
+ ret = of_reserved_mem_region_to_resource(dev->of_node, 0, &res);
+ if (ret) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "fail to get reg from hremote\n");
+ return ret;
+ }
+
+ if (res.end - res.start + 1 < ab->qmi.target_mem[i].size) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "fail to assign memory of sz\n");
+ return -EINVAL;
+ }
+
+ ab->qmi.target_mem[idx].paddr = res.start;
+ ab->qmi.target_mem[idx].iaddr =
+ ioremap(ab->qmi.target_mem[idx].paddr,
+ ab->qmi.target_mem[i].size);
+ if (!ab->qmi.target_mem[idx].iaddr)
+ return -EIO;
+
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ host_ddr_sz = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ case BDF_MEM_REGION_TYPE:
+ ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr;
+ ab->qmi.target_mem[idx].iaddr = NULL;
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ case CALDB_MEM_REGION_TYPE:
+ if (ab->qmi.target_mem[i].size > ATH11K_QMI_CALDB_SIZE) {
+ ath11k_warn(ab, "qmi mem size is low to load caldata\n");
+ return -EINVAL;
+ }
+
+ if (ath11k_core_coldboot_cal_support(ab)) {
+ if (resource_size(&res)) {
+ ab->qmi.target_mem[idx].paddr =
+ res.start + host_ddr_sz;
+ ab->qmi.target_mem[idx].iaddr =
+ ioremap(ab->qmi.target_mem[idx].paddr,
+ ab->qmi.target_mem[i].size);
+ if (!ab->qmi.target_mem[idx].iaddr)
+ return -EIO;
+ } else {
+ ab->qmi.target_mem[idx].paddr =
+ ATH11K_QMI_CALDB_ADDRESS;
+ ab->qmi.target_mem[idx].iaddr = NULL;
+ }
+ } else {
+ ab->qmi.target_mem[idx].paddr = 0;
+ ab->qmi.target_mem[idx].iaddr = NULL;
+ }
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ default:
+ ath11k_warn(ab, "qmi ignore invalid mem req type %d\n",
+ ab->qmi.target_mem[i].type);
+ break;
+ }
+ }
+ ab->qmi.mem_seg_count = idx;
+
+ return 0;
+}
+
+static int ath11k_qmi_request_device_info(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_device_info_req_msg_v01 req = {};
+ struct qmi_wlanfw_device_info_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
+ void __iomem *bar_addr_va;
+ int ret;
+
+ /* device info message req is only sent for hybrid bus devices */
+ if (!ab->hw_params.hybrid_bus_type)
+ return 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlfw_device_info_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_DEVICE_INFO_REQ_V01,
+ QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_device_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send qmi target device info request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait qmi target device info request: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi device info request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!resp.bar_addr_valid || !resp.bar_size_valid) {
+ ath11k_warn(ab, "qmi device info response invalid: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!resp.bar_addr ||
+ resp.bar_size != ATH11K_QMI_DEVICE_BAR_SIZE) {
+ ath11k_warn(ab, "qmi device info invalid address and size: %llu %u\n",
+ resp.bar_addr, resp.bar_size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ bar_addr_va = devm_ioremap(ab->dev, resp.bar_addr, resp.bar_size);
+
+ if (!bar_addr_va) {
+ ath11k_warn(ab, "qmi device info ioremap failed\n");
+ ab->mem_len = 0;
+ ret = -EIO;
+ goto out;
+ }
+
+ ab->mem = bar_addr_va;
+ ab->mem_len = resp.bar_size;
+
+ if (!ab->hw_params.ce_remap)
+ ab->mem_ce = ab->mem;
+
+ return 0;
+out:
+ return ret;
+}
+
+static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_cap_req_msg_v01 req;
+ struct qmi_wlanfw_cap_resp_msg_v01 resp;
+ struct qmi_txn txn;
+ int ret = 0;
+ int r;
+ char *fw_build_id;
+ int fw_build_id_mask_len;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_cap_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "target cap request\n");
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_CAP_REQ_V01,
+ QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send qmi cap request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait qmi cap request: %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi cap request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp.chip_info_valid) {
+ ab->qmi.target.chip_id = resp.chip_info.chip_id;
+ ab->qmi.target.chip_family = resp.chip_info.chip_family;
+ }
+
+ if (resp.board_info_valid)
+ ab->qmi.target.board_id = resp.board_info.board_id;
+ else
+ ab->qmi.target.board_id = 0xFF;
+
+ if (resp.soc_info_valid)
+ ab->qmi.target.soc_id = resp.soc_info.soc_id;
+
+ if (resp.fw_version_info_valid) {
+ ab->qmi.target.fw_version = resp.fw_version_info.fw_version;
+ strscpy(ab->qmi.target.fw_build_timestamp,
+ resp.fw_version_info.fw_build_timestamp,
+ sizeof(ab->qmi.target.fw_build_timestamp));
+ }
+
+ if (resp.fw_build_id_valid)
+ strscpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
+ sizeof(ab->qmi.target.fw_build_id));
+
+ if (resp.eeprom_read_timeout_valid) {
+ ab->qmi.target.eeprom_caldata =
+ resp.eeprom_read_timeout;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "cal data supported from eeprom\n");
+ }
+
+ fw_build_id = ab->qmi.target.fw_build_id;
+ fw_build_id_mask_len = strlen(FW_BUILD_ID_MASK);
+ if (!strncmp(fw_build_id, FW_BUILD_ID_MASK, fw_build_id_mask_len))
+ fw_build_id = fw_build_id + fw_build_id_mask_len;
+
+ ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
+ ab->qmi.target.chip_id, ab->qmi.target.chip_family,
+ ab->qmi.target.board_id, ab->qmi.target.soc_id);
+
+ ath11k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
+ ab->qmi.target.fw_version,
+ ab->qmi.target.fw_build_timestamp,
+ fw_build_id);
+
+ r = ath11k_core_check_smbios(ab);
+ if (r)
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "SMBIOS bdf variant name not set.\n");
+
+ r = ath11k_core_check_dt(ab);
+ if (r)
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "DT bdf variant name not set.\n");
+
+out:
+ return ret;
+}
+
+static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
+ const u8 *data, u32 len, u8 type)
+{
+ struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
+ struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
+ struct qmi_txn txn;
+ const u8 *temp = data;
+ void __iomem *bdf_addr = NULL;
+ int ret = 0;
+ u32 remaining = len;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ if (ab->hw_params.fixed_bdf_addr) {
+ bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size);
+ if (!bdf_addr) {
+ ath11k_warn(ab, "qmi ioremap error for bdf_addr\n");
+ ret = -EIO;
+ goto err_free_req;
+ }
+ }
+
+ while (remaining) {
+ req->valid = 1;
+ req->file_id_valid = 1;
+ req->file_id = ab->qmi.target.board_id;
+ req->total_size_valid = 1;
+ req->total_size = remaining;
+ req->seg_id_valid = 1;
+ req->data_valid = 1;
+ req->bdf_type = type;
+ req->bdf_type_valid = 1;
+ req->end_valid = 1;
+ req->end = 0;
+
+ if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ if (ab->hw_params.fixed_bdf_addr ||
+ type == ATH11K_QMI_FILE_TYPE_EEPROM) {
+ req->data_valid = 0;
+ req->end = 1;
+ req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ } else {
+ memcpy(req->data, temp, req->data_len);
+ }
+
+ if (ab->hw_params.fixed_bdf_addr) {
+ if (type == ATH11K_QMI_FILE_TYPE_CALDATA)
+ bdf_addr += ab->hw_params.fw.cal_offset;
+
+ memcpy_toio(bdf_addr, temp, len);
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_bdf_download_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto err_iounmap;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "bdf download req fixed addr type %d\n",
+ type);
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ goto err_iounmap;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait board file download request: %d\n",
+ ret);
+ goto err_iounmap;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "board file download request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto err_iounmap;
+ }
+
+ if (ab->hw_params.fixed_bdf_addr ||
+ type == ATH11K_QMI_FILE_TYPE_EEPROM) {
+ remaining = 0;
+ } else {
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "bdf download request remaining %i\n",
+ remaining);
+ }
+ }
+
+err_iounmap:
+ if (ab->hw_params.fixed_bdf_addr)
+ iounmap(bdf_addr);
+
+err_free_req:
+ kfree(req);
+
+ return ret;
+}
+
+static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab,
+ bool regdb)
+{
+ struct device *dev = ab->dev;
+ char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE];
+ const struct firmware *fw_entry;
+ struct ath11k_board_data bd;
+ u32 fw_size, file_type;
+ int ret = 0, bdf_type;
+ const u8 *tmp;
+
+ memset(&bd, 0, sizeof(bd));
+
+ if (regdb) {
+ ret = ath11k_core_fetch_regdb(ab, &bd);
+ } else {
+ ret = ath11k_core_fetch_bdf(ab, &bd);
+ if (ret)
+ ath11k_warn(ab, "qmi failed to fetch board file: %d\n", ret);
+ }
+
+ if (ret)
+ goto out;
+
+ if (regdb)
+ bdf_type = ATH11K_QMI_BDF_TYPE_REGDB;
+ else if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0)
+ bdf_type = ATH11K_QMI_BDF_TYPE_ELF;
+ else
+ bdf_type = ATH11K_QMI_BDF_TYPE_BIN;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "bdf_type %d\n", bdf_type);
+
+ fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len);
+
+ ret = ath11k_qmi_load_file_target_mem(ab, bd.data, fw_size, bdf_type);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to load bdf file\n");
+ goto out;
+ }
+
+ /* QCA6390/WCN6855 does not support cal data, skip it */
+ if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF || bdf_type == ATH11K_QMI_BDF_TYPE_REGDB)
+ goto out;
+
+ if (ab->qmi.target.eeprom_caldata) {
+ file_type = ATH11K_QMI_FILE_TYPE_EEPROM;
+ tmp = filename;
+ fw_size = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ } else {
+ file_type = ATH11K_QMI_FILE_TYPE_CALDATA;
+
+ /* cal-<bus>-<id>.bin */
+ snprintf(filename, sizeof(filename), "cal-%s-%s.bin",
+ ath11k_bus_str(ab->hif.bus), dev_name(dev));
+ fw_entry = ath11k_core_firmware_request(ab, filename);
+ if (!IS_ERR(fw_entry))
+ goto success;
+
+ fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE);
+ if (IS_ERR(fw_entry)) {
+ /* Caldata may not be present during first time calibration in
+ * factory hence allow to boot without loading caldata in ftm mode
+ */
+ if (ath11k_ftm_mode) {
+ ath11k_info(ab,
+ "Booting without cal data file in factory test mode\n");
+ return 0;
+ }
+ ret = PTR_ERR(fw_entry);
+ ath11k_warn(ab,
+ "qmi failed to load CAL data file:%s\n",
+ filename);
+ goto out;
+ }
+success:
+ fw_size = min_t(u32, ab->hw_params.fw.board_size, fw_entry->size);
+ tmp = fw_entry->data;
+ }
+
+ ret = ath11k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to load caldata\n");
+ goto out_qmi_cal;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "caldata type: %u\n", file_type);
+
+out_qmi_cal:
+ if (!ab->qmi.target.eeprom_caldata)
+ release_firmware(fw_entry);
+out:
+ ath11k_core_free_bdf(ab, &bd);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "BDF download sequence completed\n");
+
+ return ret;
+}
+
+static int ath11k_qmi_m3_load(struct ath11k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+ const struct firmware *fw = NULL;
+ const void *m3_data;
+ char path[100];
+ size_t m3_len;
+ int ret;
+
+ if (m3_mem->vaddr)
+ /* m3 firmware buffer is already available in the DMA buffer */
+ return 0;
+
+ if (ab->fw.m3_data && ab->fw.m3_len > 0) {
+ /* firmware-N.bin had a m3 firmware file so use that */
+ m3_data = ab->fw.m3_data;
+ m3_len = ab->fw.m3_len;
+ } else {
+ /* No m3 file in firmware-N.bin so try to request old
+ * separate m3.bin.
+ */
+ fw = ath11k_core_firmware_request(ab, ATH11K_M3_FILE);
+ if (IS_ERR(fw)) {
+ ret = PTR_ERR(fw);
+ ath11k_core_create_firmware_path(ab, ATH11K_M3_FILE,
+ path, sizeof(path));
+ ath11k_err(ab, "failed to load %s: %d\n", path, ret);
+ return ret;
+ }
+
+ m3_data = fw->data;
+ m3_len = fw->size;
+ }
+
+ m3_mem->vaddr = dma_alloc_coherent(ab->dev,
+ m3_len, &m3_mem->paddr,
+ GFP_KERNEL);
+ if (!m3_mem->vaddr) {
+ ath11k_err(ab, "failed to allocate memory for M3 with size %zu\n",
+ m3_len);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(m3_mem->vaddr, m3_data, m3_len);
+ m3_mem->size = m3_len;
+
+ ret = 0;
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static void ath11k_qmi_m3_free(struct ath11k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+
+ if (!ab->hw_params.m3_fw_support || !m3_mem->vaddr)
+ return;
+
+ dma_free_coherent(ab->dev, m3_mem->size,
+ m3_mem->vaddr, m3_mem->paddr);
+ m3_mem->vaddr = NULL;
+ m3_mem->size = 0;
+}
+
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+ struct qmi_wlanfw_m3_info_req_msg_v01 req;
+ struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ if (ab->hw_params.m3_fw_support) {
+ ret = ath11k_qmi_m3_load(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to load m3 firmware: %d", ret);
+ return ret;
+ }
+
+ req.addr = m3_mem->paddr;
+ req.size = m3_mem->size;
+ } else {
+ req.addr = 0;
+ req.size = 0;
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "m3 info req\n");
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_M3_INFO_REQ_V01,
+ QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send m3 information request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait m3 information request: %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "m3 info request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab,
+ u32 mode)
+{
+ struct qmi_wlanfw_wlan_mode_req_msg_v01 req;
+ struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.mode = mode;
+ req.hw_debug_valid = 1;
+ req.hw_debug = 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_mode_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "wlan mode req mode %d\n", mode);
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_MODE_REQ_V01,
+ QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send wlan mode request (mode %d): %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ if (mode == ATH11K_FIRMWARE_MODE_OFF && ret == -ENETRESET) {
+ ath11k_warn(ab, "WLFW service is dis-connected\n");
+ return 0;
+ }
+ ath11k_warn(ab, "failed to wait wlan mode request (mode %d): %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "wlan mode request failed (mode: %d): %d %d\n",
+ mode, resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
+ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
+ struct ce_pipe_config *ce_cfg;
+ struct service_to_pipe *svc_cfg;
+ struct qmi_txn txn;
+ int ret = 0, pipe_num;
+
+ ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
+ svc_cfg = (struct service_to_pipe *)ab->qmi.ce_cfg.svc_to_ce_map;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ req->host_version_valid = 1;
+ strscpy(req->host_version, ATH11K_HOST_VERSION_STRING,
+ sizeof(req->host_version));
+
+ req->tgt_cfg_valid = 1;
+ /* This is number of CE configs */
+ req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len;
+ for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
+ req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
+ req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
+ req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
+ req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
+ req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
+ }
+
+ req->svc_cfg_valid = 1;
+ /* This is number of Service/CE configs */
+ req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len;
+ for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
+ req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
+ req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
+ req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
+ }
+ req->shadow_reg_valid = 0;
+
+ /* set shadow v2 configuration */
+ if (ab->hw_params.supports_shadow_regs) {
+ req->shadow_reg_v2_valid = 1;
+ req->shadow_reg_v2_len = min_t(u32,
+ ab->qmi.ce_cfg.shadow_reg_v2_len,
+ QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01);
+ memcpy(&req->shadow_reg_v2, ab->qmi.ce_cfg.shadow_reg_v2,
+ sizeof(u32) * req->shadow_reg_v2_len);
+ } else {
+ req->shadow_reg_v2_valid = 0;
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "wlan cfg req\n");
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_CFG_REQ_V01,
+ QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send wlan config request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait wlan config request: %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "wlan config request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(req);
+ return ret;
+}
+
+static int ath11k_qmi_wlanfw_wlan_ini_send(struct ath11k_base *ab, bool enable)
+{
+ int ret;
+ struct qmi_txn txn;
+ struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {};
+ struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp = {};
+
+ req.enablefwlog_valid = true;
+ req.enablefwlog = enable ? 1 : 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_ini_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_INI_REQ_V01,
+ QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan ini request, err = %d\n",
+ ret);
+ qmi_txn_cancel(&txn);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed wlan ini request, err = %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi wlan ini request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ }
+
+out:
+ return ret;
+}
+
+void ath11k_qmi_firmware_stop(struct ath11k_base *ab)
+{
+ int ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware stop\n");
+
+ ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_OFF);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan mode off: %d\n", ret);
+ return;
+ }
+}
+
+int ath11k_qmi_firmware_start(struct ath11k_base *ab,
+ u32 mode)
+{
+ int ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware start\n");
+
+ if (ab->hw_params.fw_wmi_diag_event) {
+ ret = ath11k_qmi_wlanfw_wlan_ini_send(ab, true);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan fw ini:%d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = ath11k_qmi_wlanfw_wlan_cfg_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan cfg: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_qmi_wlanfw_mode_send(ab, mode);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab)
+{
+ long time_left;
+
+ if (!ath11k_core_coldboot_cal_support(ab) ||
+ ab->hw_params.cbcal_restart_fw == 0)
+ return 0;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "wait for cold boot done\n");
+
+ time_left = wait_event_timeout(ab->qmi.cold_boot_waitq,
+ (ab->qmi.cal_done == 1),
+ ATH11K_COLD_BOOT_FW_RESET_DELAY);
+
+ if (time_left <= 0) {
+ ath11k_warn(ab, "Coldboot Calibration timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* reset the firmware */
+ ath11k_hif_power_down(ab, false);
+ ath11k_hif_power_up(ab);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "exit wait for cold boot done\n");
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_qmi_fwreset_from_cold_boot);
+
+static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab)
+{
+ long time_left;
+ int ret;
+
+ ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_COLD_BOOT);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration wait started\n");
+
+ time_left = wait_event_timeout(ab->qmi.cold_boot_waitq,
+ (ab->qmi.cal_done == 1),
+ ATH11K_COLD_BOOT_FW_RESET_DELAY);
+ if (time_left <= 0) {
+ ath11k_warn(ab, "coldboot calibration timed out\n");
+ return 0;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration done\n");
+
+ return 0;
+}
+
+static int
+ath11k_qmi_driver_event_post(struct ath11k_qmi *qmi,
+ enum ath11k_qmi_event_type type,
+ void *data)
+{
+ struct ath11k_qmi_driver_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return -ENOMEM;
+
+ event->type = type;
+ event->data = data;
+
+ spin_lock(&qmi->event_lock);
+ list_add_tail(&event->list, &qmi->event_list);
+ spin_unlock(&qmi->event_lock);
+
+ queue_work(qmi->event_wq, &qmi->event_work);
+
+ return 0;
+}
+
+static int ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_respond_fw_mem_request(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to respond fw mem req: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_request_target_cap(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to request qmi target capabilities: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath11k_qmi_request_device_info(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to request qmi device info: %d\n", ret);
+ return ret;
+ }
+
+ if (ab->hw_params.supports_regdb)
+ ath11k_qmi_load_bdf_qmi(ab, true);
+
+ ret = ath11k_qmi_load_bdf_qmi(ab, false);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to load board data file: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_fw_ind_register_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to send qmi firmware indication: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath11k_qmi_host_cap_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to send qmi host cap: %d\n", ret);
+ return ret;
+ }
+
+ if (!ab->hw_params.fixed_fw_mem)
+ return ret;
+
+ ret = ath11k_qmi_event_load_bdf(qmi);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to download BDF:%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+ const struct qmi_wlanfw_request_mem_ind_msg_v01 *msg = data;
+ int i, ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware request memory request\n");
+
+ if (msg->mem_seg_len == 0 ||
+ msg->mem_seg_len > ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01)
+ ath11k_warn(ab, "invalid memory segment length: %u\n",
+ msg->mem_seg_len);
+
+ ab->qmi.mem_seg_count = msg->mem_seg_len;
+
+ for (i = 0; i < qmi->mem_seg_count ; i++) {
+ ab->qmi.target_mem[i].type = msg->mem_seg[i].type;
+ ab->qmi.target_mem[i].size = msg->mem_seg[i].size;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "mem seg type %d size %d\n",
+ msg->mem_seg[i].type, msg->mem_seg[i].size);
+ }
+
+ if (ab->hw_params.fixed_mem_region ||
+ test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
+ ret = ath11k_qmi_assign_target_mem_chunk(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to assign qmi target memory: %d\n",
+ ret);
+ return;
+ }
+ } else {
+ ret = ath11k_qmi_alloc_target_mem_chunk(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to allocate qmi target memory: %d\n",
+ ret);
+ return;
+ }
+ }
+
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_REQUEST_MEM, NULL);
+}
+
+static void ath11k_qmi_msg_mem_ready_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware memory ready indication\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_MEM_READY, NULL);
+}
+
+static void ath11k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware ready\n");
+
+ if (!ab->qmi.cal_done) {
+ ab->qmi.cal_done = 1;
+ wake_up(&ab->qmi.cold_boot_waitq);
+ }
+
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_READY, NULL);
+}
+
+static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl,
+ struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ab->qmi.cal_done = 1;
+ wake_up(&ab->qmi.cold_boot_waitq);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "cold boot calibration done\n");
+}
+
+static void ath11k_qmi_msg_fw_init_done_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl,
+ struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_INIT_DONE, NULL);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware init done\n");
+}
+
+static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_REQUEST_MEM_IND_V01,
+ .ei = qmi_wlanfw_request_mem_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct qmi_wlanfw_request_mem_ind_msg_v01),
+ .fn = ath11k_qmi_msg_mem_request_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_MEM_READY_IND_V01,
+ .ei = qmi_wlanfw_mem_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct qmi_wlanfw_fw_mem_ready_ind_msg_v01),
+ .fn = ath11k_qmi_msg_mem_ready_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_READY_IND_V01,
+ .ei = qmi_wlanfw_fw_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01),
+ .fn = ath11k_qmi_msg_fw_ready_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01,
+ .ei = qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01),
+ .fn = ath11k_qmi_msg_cold_boot_cal_done_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_INIT_DONE_IND_V01,
+ .ei = qmi_wlfw_fw_init_done_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct qmi_wlfw_fw_init_done_ind_msg_v01),
+ .fn = ath11k_qmi_msg_fw_init_done_cb,
+ },
+
+ /* end of list */
+ {},
+};
+
+static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+ struct sockaddr_qrtr *sq = &qmi->sq;
+ int ret;
+
+ sq->sq_family = AF_QIPCRTR;
+ sq->sq_node = service->node;
+ sq->sq_port = service->port;
+
+ ret = kernel_connect(qmi_hdl->sock, (struct sockaddr_unsized *)sq,
+ sizeof(*sq), 0);
+ if (ret) {
+ ath11k_warn(ab, "failed to connect to qmi remote service: %d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "wifi fw qmi service connected\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_ARRIVE, NULL);
+
+ return ret;
+}
+
+static void ath11k_qmi_ops_del_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "wifi fw del server\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_EXIT, NULL);
+}
+
+static const struct qmi_ops ath11k_qmi_ops = {
+ .new_server = ath11k_qmi_ops_new_server,
+ .del_server = ath11k_qmi_ops_del_server,
+};
+
+static void ath11k_qmi_driver_event_work(struct work_struct *work)
+{
+ struct ath11k_qmi *qmi = container_of(work, struct ath11k_qmi,
+ event_work);
+ struct ath11k_qmi_driver_event *event;
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ spin_lock(&qmi->event_lock);
+ while (!list_empty(&qmi->event_list)) {
+ event = list_first_entry(&qmi->event_list,
+ struct ath11k_qmi_driver_event, list);
+ list_del(&event->list);
+ spin_unlock(&qmi->event_lock);
+
+ if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)) {
+ kfree(event);
+ return;
+ }
+
+ switch (event->type) {
+ case ATH11K_QMI_EVENT_SERVER_ARRIVE:
+ ret = ath11k_qmi_event_server_arrive(qmi);
+ if (ret < 0)
+ set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ case ATH11K_QMI_EVENT_SERVER_EXIT:
+ set_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+ set_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+
+ if (!ab->is_reset)
+ ath11k_core_pre_reconfigure_recovery(ab);
+ break;
+ case ATH11K_QMI_EVENT_REQUEST_MEM:
+ ret = ath11k_qmi_event_mem_request(qmi);
+ if (ret < 0)
+ set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ case ATH11K_QMI_EVENT_FW_MEM_READY:
+ ret = ath11k_qmi_event_load_bdf(qmi);
+ if (ret < 0) {
+ set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ }
+
+ ret = ath11k_qmi_wlanfw_m3_info_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab,
+ "failed to send qmi m3 info req: %d\n", ret);
+ set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ }
+
+ break;
+ case ATH11K_QMI_EVENT_FW_INIT_DONE:
+ clear_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
+ if (ab->is_reset)
+ ath11k_hal_dump_srng_stats(ab);
+ queue_work(ab->workqueue, &ab->restart_work);
+ break;
+ }
+
+ if (ab->qmi.cal_done == 0 &&
+ ath11k_core_coldboot_cal_support(ab)) {
+ ath11k_qmi_process_coldboot_calibration(ab);
+ } else {
+ clear_bit(ATH11K_FLAG_CRASH_FLUSH,
+ &ab->dev_flags);
+ clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+ ret = ath11k_core_qmi_firmware_ready(ab);
+ if (ret) {
+ set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ }
+ set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
+ }
+
+ break;
+ case ATH11K_QMI_EVENT_FW_READY:
+ /* For targets requiring a FW restart upon cold
+ * boot completion, there is no need to process
+ * FW ready; such targets will receive FW init
+ * done message after FW restart.
+ */
+ if (ab->hw_params.cbcal_restart_fw)
+ break;
+
+ clear_bit(ATH11K_FLAG_CRASH_FLUSH,
+ &ab->dev_flags);
+ clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+ ath11k_core_qmi_firmware_ready(ab);
+ set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
+
+ break;
+ case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE:
+ break;
+ default:
+ ath11k_warn(ab, "invalid qmi event type: %d", event->type);
+ break;
+ }
+ kfree(event);
+ spin_lock(&qmi->event_lock);
+ }
+ spin_unlock(&qmi->event_lock);
+}
+
+int ath11k_qmi_init_service(struct ath11k_base *ab)
+{
+ int ret;
+
+ memset(&ab->qmi.target, 0, sizeof(struct target_info));
+ memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk));
+ ab->qmi.ab = ab;
+
+ ab->qmi.target_mem_mode = ab->hw_params.fw_mem_mode;
+ ret = qmi_handle_init(&ab->qmi.handle, ATH11K_QMI_RESP_LEN_MAX,
+ &ath11k_qmi_ops, ath11k_qmi_msg_handlers);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to initialize qmi handle: %d\n", ret);
+ return ret;
+ }
+
+ ab->qmi.event_wq = alloc_ordered_workqueue("ath11k_qmi_driver_event", 0);
+ if (!ab->qmi.event_wq) {
+ ath11k_err(ab, "failed to allocate workqueue\n");
+ return -EFAULT;
+ }
+
+ INIT_LIST_HEAD(&ab->qmi.event_list);
+ spin_lock_init(&ab->qmi.event_lock);
+ INIT_WORK(&ab->qmi.event_work, ath11k_qmi_driver_event_work);
+
+ ret = qmi_add_lookup(&ab->qmi.handle, ATH11K_QMI_WLFW_SERVICE_ID_V01,
+ ATH11K_QMI_WLFW_SERVICE_VERS_V01,
+ ab->qmi.service_ins_id);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to add qmi lookup: %d\n", ret);
+ destroy_workqueue(ab->qmi.event_wq);
+ return ret;
+ }
+
+ return ret;
+}
+
+void ath11k_qmi_deinit_service(struct ath11k_base *ab)
+{
+ qmi_handle_release(&ab->qmi.handle);
+ cancel_work_sync(&ab->qmi.event_work);
+ destroy_workqueue(ab->qmi.event_wq);
+ ath11k_qmi_m3_free(ab);
+ ath11k_qmi_free_target_mem_chunk(ab);
+}
+EXPORT_SYMBOL(ath11k_qmi_deinit_service);
+
+void ath11k_qmi_free_resource(struct ath11k_base *ab)
+{
+ ath11k_qmi_free_target_mem_chunk(ab);
+ ath11k_qmi_m3_free(ab);
+}
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
new file mode 100644
index 000000000000..7968ab122b65
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -0,0 +1,526 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_QMI_H
+#define ATH11K_QMI_H
+
+#include <linux/mutex.h>
+#include <linux/soc/qcom/qmi.h>
+
+#define ATH11K_HOST_VERSION_STRING "WIN"
+#define ATH11K_QMI_WLANFW_TIMEOUT_MS 10000
+#define ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE 64
+#define ATH11K_QMI_CALDB_ADDRESS 0x4BA00000
+#define ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
+#define ATH11K_QMI_WLFW_SERVICE_ID_V01 0x45
+#define ATH11K_QMI_WLFW_SERVICE_VERS_V01 0x01
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01 0x02
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390 0x01
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074 0x02
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074 0x07
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750 0x03
+#define ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
+#define ATH11K_QMI_RESP_LEN_MAX 8192
+#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52
+#define ATH11K_QMI_CALDB_SIZE 0x480000
+#define ATH11K_QMI_BDF_EXT_STR_LENGTH 0x20
+#define ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT 5
+
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x003E
+#define QMI_WLFW_FW_READY_IND_V01 0x0021
+#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
+
+#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
+#define ATH11K_FIRMWARE_MODE_OFF 4
+#define ATH11K_COLD_BOOT_FW_RESET_DELAY (60 * HZ)
+
+#define ATH11K_QMI_DEVICE_BAR_SIZE 0x200000
+
+struct ath11k_base;
+
+enum ath11k_qmi_file_type {
+ ATH11K_QMI_FILE_TYPE_BDF_GOLDEN,
+ ATH11K_QMI_FILE_TYPE_CALDATA = 2,
+ ATH11K_QMI_FILE_TYPE_EEPROM,
+ ATH11K_QMI_MAX_FILE_TYPE,
+};
+
+enum ath11k_qmi_bdf_type {
+ ATH11K_QMI_BDF_TYPE_BIN = 0,
+ ATH11K_QMI_BDF_TYPE_ELF = 1,
+ ATH11K_QMI_BDF_TYPE_REGDB = 4,
+};
+
+enum ath11k_qmi_event_type {
+ ATH11K_QMI_EVENT_SERVER_ARRIVE,
+ ATH11K_QMI_EVENT_SERVER_EXIT,
+ ATH11K_QMI_EVENT_REQUEST_MEM,
+ ATH11K_QMI_EVENT_FW_MEM_READY,
+ ATH11K_QMI_EVENT_FW_READY,
+ ATH11K_QMI_EVENT_COLD_BOOT_CAL_START,
+ ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE,
+ ATH11K_QMI_EVENT_REGISTER_DRIVER,
+ ATH11K_QMI_EVENT_UNREGISTER_DRIVER,
+ ATH11K_QMI_EVENT_RECOVERY,
+ ATH11K_QMI_EVENT_FORCE_FW_ASSERT,
+ ATH11K_QMI_EVENT_POWER_UP,
+ ATH11K_QMI_EVENT_POWER_DOWN,
+ ATH11K_QMI_EVENT_FW_INIT_DONE,
+ ATH11K_QMI_EVENT_MAX,
+};
+
+struct ath11k_qmi_driver_event {
+ struct list_head list;
+ enum ath11k_qmi_event_type type;
+ void *data;
+};
+
+struct ath11k_qmi_ce_cfg {
+ const struct ce_pipe_config *tgt_ce;
+ int tgt_ce_len;
+ const struct service_to_pipe *svc_to_ce_map;
+ int svc_to_ce_map_len;
+ const u8 *shadow_reg;
+ int shadow_reg_len;
+ u32 *shadow_reg_v2;
+ int shadow_reg_v2_len;
+};
+
+struct ath11k_qmi_event_msg {
+ struct list_head list;
+ enum ath11k_qmi_event_type type;
+};
+
+struct target_mem_chunk {
+ u32 size;
+ u32 type;
+ u32 prev_size;
+ u32 prev_type;
+ dma_addr_t paddr;
+ union {
+ u32 *vaddr;
+ void __iomem *iaddr;
+ void *anyaddr;
+ };
+};
+
+struct target_info {
+ u32 chip_id;
+ u32 chip_family;
+ u32 board_id;
+ u32 soc_id;
+ u32 fw_version;
+ u32 eeprom_caldata;
+ char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+ char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+ char bdf_ext[ATH11K_QMI_BDF_EXT_STR_LENGTH];
+};
+
+struct m3_mem_region {
+ u32 size;
+ dma_addr_t paddr;
+ void *vaddr;
+};
+
+struct ath11k_qmi {
+ struct ath11k_base *ab;
+ struct qmi_handle handle;
+ struct sockaddr_qrtr sq;
+ struct work_struct event_work;
+ struct workqueue_struct *event_wq;
+ struct list_head event_list;
+ spinlock_t event_lock; /* spinlock for qmi event list */
+ struct ath11k_qmi_ce_cfg ce_cfg;
+ struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+ u32 mem_seg_count;
+ u32 target_mem_mode;
+ bool target_mem_delayed;
+ u8 cal_done;
+ struct target_info target;
+ struct m3_mem_region m3_mem;
+ unsigned int service_ins_id;
+ wait_queue_head_t cold_boot_waitq;
+};
+
+#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 261
+#define QMI_WLANFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLANFW_HOST_CAP_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MAX_NUM_GPIO_V01 32
+#define QMI_IPQ8074_FW_MEM_MODE 0xFF
+#define HOST_DDR_REGION_TYPE 0x1
+#define BDF_MEM_REGION_TYPE 0x2
+#define M3_DUMP_REGION_TYPE 0x3
+#define CALDB_MEM_REGION_TYPE 0x4
+#define PAGEABLE_MEM_REGION_TYPE 0x9
+
+struct qmi_wlanfw_host_cap_req_msg_v01 {
+ u8 num_clients_valid;
+ u32 num_clients;
+ u8 wake_msi_valid;
+ u32 wake_msi;
+ u8 gpios_valid;
+ u32 gpios_len;
+ u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01];
+ u8 nm_modem_valid;
+ u8 nm_modem;
+ u8 bdf_support_valid;
+ u8 bdf_support;
+ u8 bdf_cache_support_valid;
+ u8 bdf_cache_support;
+ u8 m3_support_valid;
+ u8 m3_support;
+ u8 m3_cache_support_valid;
+ u8 m3_cache_support;
+ u8 cal_filesys_support_valid;
+ u8 cal_filesys_support;
+ u8 cal_cache_support_valid;
+ u8 cal_cache_support;
+ u8 cal_done_valid;
+ u8 cal_done;
+ u8 mem_bucket_valid;
+ u32 mem_bucket;
+ u8 mem_cfg_mode_valid;
+ u8 mem_cfg_mode;
+};
+
+struct qmi_wlanfw_host_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54
+#define QMI_WLANFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN 18
+#define QMI_WLANFW_IND_REGISTER_RESP_V01 0x0020
+#define QMI_WLANFW_CLIENT_ID 0x4b4e454c
+
+struct qmi_wlanfw_ind_register_req_msg_v01 {
+ u8 fw_ready_enable_valid;
+ u8 fw_ready_enable;
+ u8 initiate_cal_download_enable_valid;
+ u8 initiate_cal_download_enable;
+ u8 initiate_cal_update_enable_valid;
+ u8 initiate_cal_update_enable;
+ u8 msa_ready_enable_valid;
+ u8 msa_ready_enable;
+ u8 pin_connect_result_enable_valid;
+ u8 pin_connect_result_enable;
+ u8 client_id_valid;
+ u32 client_id;
+ u8 request_mem_enable_valid;
+ u8 request_mem_enable;
+ u8 fw_mem_ready_enable_valid;
+ u8 fw_mem_ready_enable;
+ u8 fw_init_done_enable_valid;
+ u8 fw_init_done_enable;
+ u8 rejuvenate_enable_valid;
+ u32 rejuvenate_enable;
+ u8 xo_cal_enable_valid;
+ u8 xo_cal_enable;
+ u8 cal_done_enable_valid;
+ u8 cal_done_enable;
+};
+
+struct qmi_wlanfw_ind_register_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 fw_status_valid;
+ u64 fw_status;
+};
+
+#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN 1824
+#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN 888
+#define QMI_WLANFW_RESPOND_MEM_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLANFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLANFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLANFW_MAX_NUM_MEM_CFG_V01 2
+
+struct qmi_wlanfw_mem_cfg_s_v01 {
+ u64 offset;
+ u32 size;
+ u8 secure_flag;
+};
+
+enum qmi_wlanfw_mem_type_enum_v01 {
+ WLANFW_MEM_TYPE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLANFW_MEM_TYPE_MSA_V01 = 0,
+ QMI_WLANFW_MEM_TYPE_DDR_V01 = 1,
+ QMI_WLANFW_MEM_BDF_V01 = 2,
+ QMI_WLANFW_MEM_M3_V01 = 3,
+ QMI_WLANFW_MEM_CAL_V01 = 4,
+ QMI_WLANFW_MEM_DPD_V01 = 5,
+ WLANFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_wlanfw_mem_seg_s_v01 {
+ u32 size;
+ enum qmi_wlanfw_mem_type_enum_v01 type;
+ u32 mem_cfg_len;
+ struct qmi_wlanfw_mem_cfg_s_v01 mem_cfg[QMI_WLANFW_MAX_NUM_MEM_CFG_V01];
+};
+
+struct qmi_wlanfw_request_mem_ind_msg_v01 {
+ u32 mem_seg_len;
+ struct qmi_wlanfw_mem_seg_s_v01 mem_seg[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_mem_seg_resp_s_v01 {
+ u64 addr;
+ u32 size;
+ enum qmi_wlanfw_mem_type_enum_v01 type;
+ u8 restore;
+};
+
+struct qmi_wlanfw_respond_mem_req_msg_v01 {
+ u32 mem_seg_len;
+ struct qmi_wlanfw_mem_seg_resp_s_v01 mem_seg[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_respond_mem_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_fw_mem_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlanfw_fw_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlfw_fw_init_done_ind_msg_v01 {
+ char placeholder;
+};
+
+#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
+#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235
+#define QMI_WLANFW_CAP_REQ_V01 0x0024
+#define QMI_WLANFW_CAP_RESP_V01 0x0024
+#define QMI_WLANFW_DEVICE_INFO_REQ_V01 0x004C
+#define QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01_MAX_LEN 0
+
+enum qmi_wlanfw_pipedir_enum_v01 {
+ QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+ QMI_WLFW_PIPEDIR_IN_V01 = 1,
+ QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+ QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+};
+
+struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 {
+ __le32 pipe_num;
+ __le32 pipe_dir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+};
+
+struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 {
+ __le32 service_id;
+ __le32 pipe_dir;
+ __le32 pipe_num;
+};
+
+struct qmi_wlanfw_shadow_reg_cfg_s_v01 {
+ u16 id;
+ u16 offset;
+};
+
+struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01 {
+ u32 addr;
+};
+
+struct qmi_wlanfw_memory_region_info_s_v01 {
+ u64 region_addr;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct qmi_wlanfw_rf_chip_info_s_v01 {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct qmi_wlanfw_rf_board_info_s_v01 {
+ u32 board_id;
+};
+
+struct qmi_wlanfw_soc_info_s_v01 {
+ u32 soc_id;
+};
+
+struct qmi_wlanfw_fw_version_info_s_v01 {
+ u32 fw_version;
+ char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+enum qmi_wlanfw_cal_temp_id_enum_v01 {
+ QMI_WLANFW_CAL_TEMP_IDX_0_V01 = 0,
+ QMI_WLANFW_CAL_TEMP_IDX_1_V01 = 1,
+ QMI_WLANFW_CAL_TEMP_IDX_2_V01 = 2,
+ QMI_WLANFW_CAL_TEMP_IDX_3_V01 = 3,
+ QMI_WLANFW_CAL_TEMP_IDX_4_V01 = 4,
+ QMI_WLANFW_CAL_TEMP_ID_MAX_V01 = 0xFF,
+};
+
+struct qmi_wlanfw_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 chip_info_valid;
+ struct qmi_wlanfw_rf_chip_info_s_v01 chip_info;
+ u8 board_info_valid;
+ struct qmi_wlanfw_rf_board_info_s_v01 board_info;
+ u8 soc_info_valid;
+ struct qmi_wlanfw_soc_info_s_v01 soc_info;
+ u8 fw_version_info_valid;
+ struct qmi_wlanfw_fw_version_info_s_v01 fw_version_info;
+ u8 fw_build_id_valid;
+ char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+ u8 num_macs_valid;
+ u8 num_macs;
+ u8 voltage_mv_valid;
+ u32 voltage_mv;
+ u8 time_freq_hz_valid;
+ u32 time_freq_hz;
+ u8 otp_version_valid;
+ u32 otp_version;
+ u8 eeprom_read_timeout_valid;
+ u32 eeprom_read_timeout;
+};
+
+struct qmi_wlanfw_cap_req_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlanfw_device_info_req_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlanfw_device_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u64 bar_addr;
+ u32 bar_size;
+ u8 bar_addr_valid;
+ u8 bar_size_valid;
+};
+
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN 6182
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_V01 0x0025
+/* TODO: Need to check with MCL and FW team that data can be pointer and
+ * can be last element in structure
+ */
+struct qmi_wlanfw_bdf_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum qmi_wlanfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+ u8 bdf_type_valid;
+ u8 bdf_type;
+
+};
+
+struct qmi_wlanfw_bdf_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+#define QMI_WLANFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+#define QMI_WLANFW_M3_INFO_RESP_V01 0x003C
+#define QMI_WLANFW_M3_INFO_REQ_V01 0x003C
+
+struct qmi_wlanfw_m3_info_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+struct qmi_wlanfw_m3_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN 11
+#define QMI_WLANFW_WLAN_MODE_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN 803
+#define QMI_WLANFW_WLAN_CFG_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN 4
+#define QMI_WLANFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLANFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLANFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLANFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLANFW_WLAN_INI_REQ_V01 0x002F
+#define QMI_WLANFW_MAX_STR_LEN_V01 16
+#define QMI_WLANFW_MAX_NUM_CE_V01 12
+#define QMI_WLANFW_MAX_NUM_SVC_V01 24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01 36
+
+struct qmi_wlanfw_wlan_mode_req_msg_v01 {
+ u32 mode;
+ u8 hw_debug_valid;
+ u8 hw_debug;
+};
+
+struct qmi_wlanfw_wlan_mode_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_wlan_cfg_req_msg_v01 {
+ u8 host_version_valid;
+ char host_version[QMI_WLANFW_MAX_STR_LEN_V01 + 1];
+ u8 tgt_cfg_valid;
+ u32 tgt_cfg_len;
+ struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01
+ tgt_cfg[QMI_WLANFW_MAX_NUM_CE_V01];
+ u8 svc_cfg_valid;
+ u32 svc_cfg_len;
+ struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01
+ svc_cfg[QMI_WLANFW_MAX_NUM_SVC_V01];
+ u8 shadow_reg_valid;
+ u32 shadow_reg_len;
+ struct qmi_wlanfw_shadow_reg_cfg_s_v01
+ shadow_reg[QMI_WLANFW_MAX_NUM_SHADOW_REG_V01];
+ u8 shadow_reg_v2_valid;
+ u32 shadow_reg_v2_len;
+ struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01
+ shadow_reg_v2[QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01];
+};
+
+struct qmi_wlanfw_wlan_cfg_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_wlan_ini_req_msg_v01 {
+ /* Must be set to true if enablefwlog is being passed */
+ u8 enablefwlog_valid;
+ u8 enablefwlog;
+};
+
+struct qmi_wlanfw_wlan_ini_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+int ath11k_qmi_firmware_start(struct ath11k_base *ab,
+ u32 mode);
+void ath11k_qmi_firmware_stop(struct ath11k_base *ab);
+void ath11k_qmi_deinit_service(struct ath11k_base *ab);
+int ath11k_qmi_init_service(struct ath11k_base *ab);
+void ath11k_qmi_free_resource(struct ath11k_base *ab);
+int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
new file mode 100644
index 000000000000..d62a2014315a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -0,0 +1,1066 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/rtnetlink.h>
+
+#include "core.h"
+#include "debug.h"
+
+/* World regdom to be used in case default regd from fw is unavailable */
+#define ATH11K_2GHZ_CH01_11 REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
+#define ATH11K_5GHZ_5150_5350 REG_RULE(5150 - 10, 5350 + 10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+#define ATH11K_5GHZ_5725_5850 REG_RULE(5725 - 10, 5850 + 10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+
+#define ETSI_WEATHER_RADAR_BAND_LOW 5590
+#define ETSI_WEATHER_RADAR_BAND_HIGH 5650
+#define ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT 600000
+
+static const struct ieee80211_regdomain ath11k_world_regd = {
+ .n_reg_rules = 3,
+ .alpha2 = "00",
+ .reg_rules = {
+ ATH11K_2GHZ_CH01_11,
+ ATH11K_5GHZ_5150_5350,
+ ATH11K_5GHZ_5725_5850,
+ }
+};
+
+static bool ath11k_regdom_changes(struct ath11k *ar, char *alpha2)
+{
+ const struct ieee80211_regdomain *regd;
+
+ regd = rcu_dereference_rtnl(ar->hw->wiphy->regd);
+ /* This can happen during wiphy registration where the previous
+ * user request is received before we update the regd received
+ * from firmware.
+ */
+ if (!regd)
+ return true;
+
+ return memcmp(regd->alpha2, alpha2, 2) != 0;
+}
+
+static void
+ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct wmi_init_country_params init_country_param;
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "Regulatory Notification received for %s\n", wiphy_name(wiphy));
+
+ if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "driver initiated regd update\n");
+ if (ar->state != ATH11K_STATE_ON)
+ return;
+
+ ret = ath11k_reg_update_chan_list(ar, true);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to update channel list: %d\n", ret);
+
+ return;
+ }
+
+ /* Currently supporting only General User Hints. Cell base user
+ * hints to be handled later.
+ * Hints from other sources like Core, Beacons are not expected for
+ * self managed wiphy's
+ */
+ if (!(request->initiator == NL80211_REGDOM_SET_BY_USER &&
+ request->user_reg_hint_type == NL80211_USER_REG_HINT_USER)) {
+ ath11k_warn(ar->ab, "Unexpected Regulatory event for this wiphy\n");
+ return;
+ }
+
+ if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "Country Setting is not allowed\n");
+ return;
+ }
+
+ if (!ath11k_regdom_changes(ar, request->alpha2)) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Country is already set\n");
+ return;
+ }
+
+ /* Set the country code to the firmware and will receive
+ * the WMI_REG_CHAN_LIST_CC EVENT for updating the
+ * reg info
+ */
+ if (ar->ab->hw_params.current_cc_support) {
+ memcpy(&ar->alpha2, request->alpha2, 2);
+ ret = ath11k_reg_set_cc(ar);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed set current country code: %d\n", ret);
+ } else {
+ init_country_param.flags = ALPHA_IS_SET;
+ memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
+ init_country_param.cc_info.alpha2[2] = 0;
+
+ ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "INIT Country code set to fw failed : %d\n", ret);
+ }
+
+ ath11k_mac_11d_scan_stop(ar);
+ ar->regdom_set_by_user = true;
+}
+
+int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
+{
+ struct ieee80211_supported_band **bands;
+ struct scan_chan_list_params *params;
+ struct ieee80211_channel *channel;
+ struct ieee80211_hw *hw = ar->hw;
+ struct channel_param *ch;
+ enum nl80211_band band;
+ int num_channels = 0;
+ int i, ret = 0;
+
+ if (ar->state == ATH11K_STATE_RESTARTING)
+ return 0;
+
+ bands = hw->wiphy->bands;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ if (bands[band]->channels[i].flags &
+ IEEE80211_CHAN_DISABLED)
+ continue;
+
+ num_channels++;
+ }
+ }
+
+ if (WARN_ON(!num_channels))
+ return -EINVAL;
+
+ params = kzalloc(struct_size(params, ch_param, num_channels),
+ GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ params->pdev_id = ar->pdev->pdev_id;
+ params->nallchans = num_channels;
+
+ ch = params->ch_param;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ channel = &bands[band]->channels[i];
+
+ if (channel->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ /* TODO: Set to true/false based on some condition? */
+ ch->allow_ht = true;
+ ch->allow_vht = true;
+ ch->allow_he = true;
+
+ ch->dfs_set =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
+ ch->is_chan_passive = !!(channel->flags &
+ IEEE80211_CHAN_NO_IR);
+ ch->is_chan_passive |= ch->dfs_set;
+ ch->mhz = channel->center_freq;
+ ch->cfreq1 = channel->center_freq;
+ ch->minpower = 0;
+ ch->maxpower = channel->max_power * 2;
+ ch->maxregpower = channel->max_reg_power * 2;
+ ch->antennamax = channel->max_antenna_gain * 2;
+
+ /* TODO: Use appropriate phymodes */
+ if (channel->band == NL80211_BAND_2GHZ)
+ ch->phy_mode = MODE_11G;
+ else
+ ch->phy_mode = MODE_11A;
+
+ if (channel->band == NL80211_BAND_6GHZ &&
+ cfg80211_channel_is_psc(channel))
+ ch->psc_channel = true;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "mac channel [%d/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+ i, params->nallchans,
+ ch->mhz, ch->maxpower, ch->maxregpower,
+ ch->antennamax, ch->phy_mode);
+
+ ch++;
+ /* TODO: use quarrter/half rate, cfreq12, dfs_cfreq2
+ * set_agile, reg_class_idx
+ */
+ }
+ }
+
+ if (wait) {
+ spin_lock_bh(&ar->data_lock);
+ list_add_tail(&params->list, &ar->channel_update_queue);
+ spin_unlock_bh(&ar->data_lock);
+
+ queue_work(ar->ab->workqueue, &ar->channel_update_work);
+
+ return 0;
+ }
+
+ ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
+ kfree(params);
+
+ return ret;
+}
+
+static void ath11k_copy_regd(struct ieee80211_regdomain *regd_orig,
+ struct ieee80211_regdomain *regd_copy)
+{
+ u8 i;
+
+ /* The caller should have checked error conditions */
+ memcpy(regd_copy, regd_orig, sizeof(*regd_orig));
+
+ for (i = 0; i < regd_orig->n_reg_rules; i++)
+ memcpy(&regd_copy->reg_rules[i], &regd_orig->reg_rules[i],
+ sizeof(struct ieee80211_reg_rule));
+}
+
+int ath11k_regd_update(struct ath11k *ar)
+{
+ struct ieee80211_regdomain *regd, *regd_copy = NULL;
+ int ret, regd_len, pdev_id;
+ struct ath11k_base *ab;
+
+ ab = ar->ab;
+ pdev_id = ar->pdev_idx;
+
+ spin_lock_bh(&ab->base_lock);
+
+ /* Prefer the latest regd update over default if it's available */
+ if (ab->new_regd[pdev_id]) {
+ regd = ab->new_regd[pdev_id];
+ } else {
+ /* Apply the regd received during init through
+ * WMI_REG_CHAN_LIST_CC event. In case of failure to
+ * receive the regd, initialize with a default world
+ * regulatory.
+ */
+ if (ab->default_regd[pdev_id]) {
+ regd = ab->default_regd[pdev_id];
+ } else {
+ ath11k_warn(ab,
+ "failed to receive default regd during init\n");
+ regd = (struct ieee80211_regdomain *)&ath11k_world_regd;
+ }
+ }
+
+ if (!regd) {
+ ret = -EINVAL;
+ spin_unlock_bh(&ab->base_lock);
+ goto err;
+ }
+
+ regd_len = sizeof(*regd) + (regd->n_reg_rules *
+ sizeof(struct ieee80211_reg_rule));
+
+ regd_copy = kzalloc(regd_len, GFP_ATOMIC);
+ if (regd_copy)
+ ath11k_copy_regd(regd, regd_copy);
+
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!regd_copy) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = regulatory_set_wiphy_regd(ar->hw->wiphy, regd_copy);
+
+ kfree(regd_copy);
+
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ ath11k_warn(ab, "failed to perform regd update : %d\n", ret);
+ return ret;
+}
+
+static enum nl80211_dfs_regions
+ath11k_map_fw_dfs_region(enum ath11k_dfs_region dfs_region)
+{
+ switch (dfs_region) {
+ case ATH11K_DFS_REG_FCC:
+ case ATH11K_DFS_REG_CN:
+ return NL80211_DFS_FCC;
+ case ATH11K_DFS_REG_ETSI:
+ case ATH11K_DFS_REG_KR:
+ return NL80211_DFS_ETSI;
+ case ATH11K_DFS_REG_MKK:
+ case ATH11K_DFS_REG_MKK_N:
+ return NL80211_DFS_JP;
+ default:
+ return NL80211_DFS_UNSET;
+ }
+}
+
+static u32 ath11k_map_fw_reg_flags(u16 reg_flags)
+{
+ u32 flags = 0;
+
+ if (reg_flags & REGULATORY_CHAN_NO_IR)
+ flags = NL80211_RRF_NO_IR;
+
+ if (reg_flags & REGULATORY_CHAN_RADAR)
+ flags |= NL80211_RRF_DFS;
+
+ if (reg_flags & REGULATORY_CHAN_NO_OFDM)
+ flags |= NL80211_RRF_NO_OFDM;
+
+ if (reg_flags & REGULATORY_CHAN_INDOOR_ONLY)
+ flags |= NL80211_RRF_NO_OUTDOOR;
+
+ if (reg_flags & REGULATORY_CHAN_NO_HT40)
+ flags |= NL80211_RRF_NO_HT40;
+
+ if (reg_flags & REGULATORY_CHAN_NO_80MHZ)
+ flags |= NL80211_RRF_NO_80MHZ;
+
+ if (reg_flags & REGULATORY_CHAN_NO_160MHZ)
+ flags |= NL80211_RRF_NO_160MHZ;
+
+ return flags;
+}
+
+static u32 ath11k_map_fw_phy_flags(u32 phy_flags)
+{
+ u32 flags = 0;
+
+ if (phy_flags & ATH11K_REG_PHY_BITMAP_NO11AX)
+ flags |= NL80211_RRF_NO_HE;
+
+ return flags;
+}
+
+static bool
+ath11k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
+ struct ieee80211_reg_rule *rule2)
+{
+ u32 start_freq1, end_freq1;
+ u32 start_freq2, end_freq2;
+
+ start_freq1 = rule1->freq_range.start_freq_khz;
+ start_freq2 = rule2->freq_range.start_freq_khz;
+
+ end_freq1 = rule1->freq_range.end_freq_khz;
+ end_freq2 = rule2->freq_range.end_freq_khz;
+
+ if ((start_freq1 >= start_freq2 &&
+ start_freq1 < end_freq2) ||
+ (start_freq2 > start_freq1 &&
+ start_freq2 < end_freq1))
+ return true;
+
+ /* TODO: Should we restrict intersection feasibility
+ * based on min bandwidth of the intersected region also,
+ * say the intersected rule should have a min bandwidth
+ * of 20MHz?
+ */
+
+ return false;
+}
+
+static void ath11k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
+ struct ieee80211_reg_rule *rule2,
+ struct ieee80211_reg_rule *new_rule)
+{
+ u32 start_freq1, end_freq1;
+ u32 start_freq2, end_freq2;
+ u32 freq_diff, max_bw;
+
+ start_freq1 = rule1->freq_range.start_freq_khz;
+ start_freq2 = rule2->freq_range.start_freq_khz;
+
+ end_freq1 = rule1->freq_range.end_freq_khz;
+ end_freq2 = rule2->freq_range.end_freq_khz;
+
+ new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
+ start_freq2);
+ new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
+
+ freq_diff = new_rule->freq_range.end_freq_khz -
+ new_rule->freq_range.start_freq_khz;
+ max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz,
+ rule2->freq_range.max_bandwidth_khz);
+ new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff);
+
+ new_rule->power_rule.max_antenna_gain =
+ min_t(u32, rule1->power_rule.max_antenna_gain,
+ rule2->power_rule.max_antenna_gain);
+
+ new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp,
+ rule2->power_rule.max_eirp);
+
+ /* Use the flags of both the rules */
+ new_rule->flags = rule1->flags | rule2->flags;
+
+ if ((rule1->flags & NL80211_RRF_PSD) && (rule2->flags & NL80211_RRF_PSD))
+ new_rule->psd = min_t(s8, rule1->psd, rule2->psd);
+ else
+ new_rule->flags &= ~NL80211_RRF_PSD;
+
+ /* To be safe, lts use the max cac timeout of both rules */
+ new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
+ rule2->dfs_cac_ms);
+}
+
+static struct ieee80211_regdomain *
+ath11k_regd_intersect(struct ieee80211_regdomain *default_regd,
+ struct ieee80211_regdomain *curr_regd)
+{
+ u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules;
+ struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule;
+ struct ieee80211_regdomain *new_regd = NULL;
+ u8 i, j, k;
+
+ num_old_regd_rules = default_regd->n_reg_rules;
+ num_curr_regd_rules = curr_regd->n_reg_rules;
+ num_new_regd_rules = 0;
+
+ /* Find the number of intersecting rules to allocate new regd memory */
+ for (i = 0; i < num_old_regd_rules; i++) {
+ old_rule = default_regd->reg_rules + i;
+ for (j = 0; j < num_curr_regd_rules; j++) {
+ curr_rule = curr_regd->reg_rules + j;
+
+ if (ath11k_reg_can_intersect(old_rule, curr_rule))
+ num_new_regd_rules++;
+ }
+ }
+
+ if (!num_new_regd_rules)
+ return NULL;
+
+ new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules *
+ sizeof(struct ieee80211_reg_rule)),
+ GFP_ATOMIC);
+
+ if (!new_regd)
+ return NULL;
+
+ /* We set the new country and dfs region directly and only trim
+ * the freq, power, antenna gain by intersecting with the
+ * default regdomain. Also MAX of the dfs cac timeout is selected.
+ */
+ new_regd->n_reg_rules = num_new_regd_rules;
+ memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2));
+ new_regd->dfs_region = curr_regd->dfs_region;
+ new_rule = new_regd->reg_rules;
+
+ for (i = 0, k = 0; i < num_old_regd_rules; i++) {
+ old_rule = default_regd->reg_rules + i;
+ for (j = 0; j < num_curr_regd_rules; j++) {
+ curr_rule = curr_regd->reg_rules + j;
+
+ if (ath11k_reg_can_intersect(old_rule, curr_rule))
+ ath11k_reg_intersect_rules(old_rule, curr_rule,
+ (new_rule + k++));
+ }
+ }
+ return new_regd;
+}
+
+static const char *
+ath11k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
+{
+ switch (dfs_region) {
+ case NL80211_DFS_FCC:
+ return "FCC";
+ case NL80211_DFS_ETSI:
+ return "ETSI";
+ case NL80211_DFS_JP:
+ return "JP";
+ default:
+ return "UNSET";
+ }
+}
+
+static u16
+ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
+{
+ u16 bw;
+
+ if (end_freq <= start_freq)
+ return 0;
+
+ bw = end_freq - start_freq;
+ bw = min_t(u16, bw, max_bw);
+
+ if (bw >= 80 && bw < 160)
+ bw = 80;
+ else if (bw >= 40 && bw < 80)
+ bw = 40;
+ else if (bw >= 20 && bw < 40)
+ bw = 20;
+ else
+ bw = 0;
+
+ return bw;
+}
+
+static void
+ath11k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
+ u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
+ s8 psd, u32 reg_flags)
+{
+ reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
+ reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
+ reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
+ reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
+ reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
+ reg_rule->psd = psd;
+ reg_rule->flags = reg_flags;
+}
+
+static void
+ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
+ struct ieee80211_regdomain *regd,
+ struct cur_reg_rule *reg_rule,
+ u8 *rule_idx, u32 flags, u16 max_bw)
+{
+ u32 start_freq;
+ u32 end_freq;
+ u16 bw;
+ u8 i;
+
+ i = *rule_idx;
+
+ /* there might be situations when even the input rule must be dropped */
+ i--;
+
+ /* frequencies below weather radar */
+ bw = ath11k_reg_adjust_bw(reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, max_bw);
+ if (bw > 0) {
+ i++;
+
+ ath11k_reg_update_rule(regd->reg_rules + i,
+ reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ reg_rule->psd_eirp, flags);
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, bw, reg_rule->ant_gain,
+ reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms,
+ flags);
+ }
+
+ /* weather radar frequencies */
+ start_freq = max_t(u32, reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW);
+ end_freq = min_t(u32, reg_rule->end_freq, ETSI_WEATHER_RADAR_BAND_HIGH);
+
+ bw = ath11k_reg_adjust_bw(start_freq, end_freq, max_bw);
+ if (bw > 0) {
+ i++;
+
+ ath11k_reg_update_rule(regd->reg_rules + i, start_freq,
+ end_freq, bw, reg_rule->ant_gain,
+ reg_rule->reg_power, reg_rule->psd_eirp, flags);
+
+ regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, start_freq, end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms, flags);
+ }
+
+ /* frequencies above weather radar */
+ bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, max_bw);
+ if (bw > 0) {
+ i++;
+
+ ath11k_reg_update_rule(regd->reg_rules + i,
+ ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ reg_rule->psd_eirp, flags);
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, bw, reg_rule->ant_gain,
+ reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms,
+ flags);
+ }
+
+ *rule_idx = i;
+}
+
+enum wmi_reg_6ghz_ap_type
+ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type)
+{
+ switch (power_type) {
+ case IEEE80211_REG_LPI_AP:
+ return WMI_REG_INDOOR_AP;
+ case IEEE80211_REG_SP_AP:
+ return WMI_REG_STANDARD_POWER_AP;
+ case IEEE80211_REG_VLP_AP:
+ return WMI_REG_VERY_LOW_POWER_AP;
+ default:
+ return WMI_REG_MAX_AP_TYPE;
+ }
+}
+
+struct ieee80211_regdomain *
+ath11k_reg_build_regd(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info, bool intersect,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type)
+{
+ struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
+ struct cur_reg_rule *reg_rule, *reg_rule_6ghz;
+ u8 i = 0, j = 0, k = 0;
+ u8 num_rules;
+ u16 max_bw;
+ u32 flags, reg_6ghz_number, max_bw_6ghz;
+ char alpha2[3];
+
+ num_rules = reg_info->num_5ghz_reg_rules + reg_info->num_2ghz_reg_rules;
+
+ if (reg_info->is_ext_reg_event) {
+ if (vdev_type == WMI_VDEV_TYPE_STA) {
+ enum wmi_reg_6ghz_ap_type ap_type;
+
+ ap_type = ath11k_reg_ap_pwr_convert(power_type);
+
+ if (ap_type == WMI_REG_MAX_AP_TYPE)
+ ap_type = WMI_REG_INDOOR_AP;
+
+ reg_6ghz_number = reg_info->num_6ghz_rules_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+
+ if (reg_6ghz_number == 0) {
+ ap_type = WMI_REG_INDOOR_AP;
+ reg_6ghz_number = reg_info->num_6ghz_rules_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ }
+
+ reg_rule_6ghz = reg_info->reg_rules_6ghz_client_ptr
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ max_bw_6ghz = reg_info->max_bw_6ghz_client
+ [ap_type][WMI_REG_DEFAULT_CLIENT];
+ } else {
+ reg_6ghz_number = reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP];
+ reg_rule_6ghz =
+ reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP];
+ max_bw_6ghz = reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP];
+ }
+
+ num_rules += reg_6ghz_number;
+ }
+
+ if (!num_rules)
+ goto ret;
+
+ /* Add max additional rules to accommodate weather radar band */
+ if (reg_info->dfs_region == ATH11K_DFS_REG_ETSI)
+ num_rules += 2;
+
+ tmp_regd = kzalloc(sizeof(*tmp_regd) +
+ (num_rules * sizeof(struct ieee80211_reg_rule)),
+ GFP_ATOMIC);
+ if (!tmp_regd)
+ goto ret;
+
+ memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ alpha2[2] = '\0';
+ tmp_regd->dfs_region = ath11k_map_fw_dfs_region(reg_info->dfs_region);
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "Country %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
+ alpha2, ath11k_reg_get_regdom_str(tmp_regd->dfs_region),
+ reg_info->dfs_region, num_rules);
+ /* Update reg_rules[] below. Firmware is expected to
+ * send these rules in order(2 GHz rules first and then 5 GHz)
+ */
+ for (; i < num_rules; i++) {
+ if (reg_info->num_2ghz_reg_rules &&
+ (i < reg_info->num_2ghz_reg_rules)) {
+ reg_rule = reg_info->reg_rules_2ghz_ptr + i;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_2ghz);
+ flags = 0;
+ } else if (reg_info->num_5ghz_reg_rules &&
+ (j < reg_info->num_5ghz_reg_rules)) {
+ reg_rule = reg_info->reg_rules_5ghz_ptr + j++;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_5ghz);
+
+ /* FW doesn't pass NL80211_RRF_AUTO_BW flag for
+ * BW Auto correction, we can enable this by default
+ * for all 5G rules here. The regulatory core performs
+ * BW correction if required and applies flags as
+ * per other BW rule flags we pass from here
+ */
+ flags = NL80211_RRF_AUTO_BW;
+ } else if (reg_info->is_ext_reg_event && reg_6ghz_number &&
+ k < reg_6ghz_number) {
+ reg_rule = reg_rule_6ghz + k++;
+ max_bw = min_t(u16, reg_rule->max_bw, max_bw_6ghz);
+ flags = NL80211_RRF_AUTO_BW;
+ if (reg_rule->psd_flag)
+ flags |= NL80211_RRF_PSD;
+ } else {
+ break;
+ }
+
+ flags |= ath11k_map_fw_reg_flags(reg_rule->flags);
+ flags |= ath11k_map_fw_phy_flags(reg_info->phybitmap);
+
+ ath11k_reg_update_rule(tmp_regd->reg_rules + i,
+ reg_rule->start_freq,
+ reg_rule->end_freq, max_bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ reg_rule->psd_eirp, flags);
+
+ /* Update dfs cac timeout if the dfs domain is ETSI and the
+ * new rule covers weather radar band.
+ * Default value of '0' corresponds to 60s timeout, so no
+ * need to update that for other rules.
+ */
+ if (flags & NL80211_RRF_DFS &&
+ reg_info->dfs_region == ATH11K_DFS_REG_ETSI &&
+ (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
+ reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
+ ath11k_reg_update_weather_radar_band(ab, tmp_regd,
+ reg_rule, &i,
+ flags, max_bw);
+ continue;
+ }
+
+ if (reg_info->is_ext_reg_event) {
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d) (%d, %d)\n",
+ i + 1, reg_rule->start_freq, reg_rule->end_freq,
+ max_bw, reg_rule->ant_gain, reg_rule->reg_power,
+ tmp_regd->reg_rules[i].dfs_cac_ms, flags,
+ reg_rule->psd_flag, reg_rule->psd_eirp);
+ } else {
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, reg_rule->start_freq, reg_rule->end_freq,
+ max_bw, reg_rule->ant_gain, reg_rule->reg_power,
+ tmp_regd->reg_rules[i].dfs_cac_ms,
+ flags);
+ }
+ }
+
+ tmp_regd->n_reg_rules = i;
+
+ if (intersect) {
+ default_regd = ab->default_regd[reg_info->phy_id];
+
+ /* Get a new regd by intersecting the received regd with
+ * our default regd.
+ */
+ new_regd = ath11k_regd_intersect(default_regd, tmp_regd);
+ kfree(tmp_regd);
+ if (!new_regd) {
+ ath11k_warn(ab, "Unable to create intersected regdomain\n");
+ goto ret;
+ }
+ } else {
+ new_regd = tmp_regd;
+ }
+
+ret:
+ return new_regd;
+}
+
+void ath11k_regd_update_chan_list_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k,
+ channel_update_work);
+ struct scan_chan_list_params *params;
+ struct list_head local_update_list;
+ int left;
+
+ INIT_LIST_HEAD(&local_update_list);
+
+ spin_lock_bh(&ar->data_lock);
+ list_splice_tail_init(&ar->channel_update_queue, &local_update_list);
+ spin_unlock_bh(&ar->data_lock);
+
+ while ((params = list_first_entry_or_null(&local_update_list,
+ struct scan_chan_list_params,
+ list))) {
+ if (ar->state_11d != ATH11K_11D_IDLE) {
+ left = wait_for_completion_timeout(&ar->completed_11d_scan,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive 11d scan complete: timed out\n");
+ ar->state_11d = ATH11K_11D_IDLE;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "reg 11d scan wait left time %d\n", left);
+ }
+
+ if ((ar->scan.state == ATH11K_SCAN_STARTING ||
+ ar->scan.state == ATH11K_SCAN_RUNNING)) {
+ left = wait_for_completion_timeout(&ar->scan.completed,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left)
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive hw scan complete: timed out\n");
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "reg hw scan wait left time %d\n", left);
+ }
+
+ ath11k_wmi_send_scan_chan_list_cmd(ar, params);
+ list_del(&params->list);
+ kfree(params);
+ }
+}
+
+static bool ath11k_reg_is_world_alpha(char *alpha)
+{
+ if (alpha[0] == '0' && alpha[1] == '0')
+ return true;
+
+ if (alpha[0] == 'n' && alpha[1] == 'a')
+ return true;
+
+ return false;
+}
+
+static enum wmi_vdev_type ath11k_reg_get_ar_vdev_type(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+
+ /* Currently each struct ath11k maps to one struct ieee80211_hw/wiphy
+ * and one struct ieee80211_regdomain, so it could only store one group
+ * reg rules. It means multi-interface concurrency in the same ath11k is
+ * not support for the regdomain. So get the vdev type of the first entry
+ * now. After concurrency support for the regdomain, this should change.
+ */
+ arvif = list_first_entry_or_null(&ar->arvifs, struct ath11k_vif, list);
+ if (arvif)
+ return arvif->vdev_type;
+
+ return WMI_VDEV_TYPE_UNSPEC;
+}
+
+int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info,
+ enum ieee80211_ap_reg_power power_type)
+{
+ struct ieee80211_regdomain *regd;
+ bool intersect = false;
+ int pdev_idx;
+ struct ath11k *ar;
+ enum wmi_vdev_type vdev_type;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event reg handle chan list");
+
+ if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
+ /* In case of failure to set the requested ctry,
+ * fw retains the current regd. We print a failure info
+ * and return from here.
+ */
+ ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ return -EINVAL;
+ }
+
+ pdev_idx = reg_info->phy_id;
+
+ /* Avoid default reg rule updates sent during FW recovery if
+ * it is already available
+ */
+ spin_lock_bh(&ab->base_lock);
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
+ ab->default_regd[pdev_idx]) {
+ spin_unlock_bh(&ab->base_lock);
+ goto retfail;
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ if (pdev_idx >= ab->num_radios) {
+ /* Process the event for phy0 only if single_pdev_only
+ * is true. If pdev_idx is valid but not 0, discard the
+ * event. Otherwise, it goes to fallback. In either case
+ * ath11k_reg_reset_info() needs to be called to avoid
+ * memory leak issue.
+ */
+ ath11k_reg_reset_info(reg_info);
+
+ if (ab->hw_params.single_pdev_only &&
+ pdev_idx < ab->hw_params.num_rxdma_per_pdev)
+ return 0;
+ goto fallback;
+ }
+
+ /* Avoid multiple overwrites to default regd, during core
+ * stop-start after mac registration.
+ */
+ if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
+ !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
+ (char *)reg_info->alpha2, 2))
+ goto retfail;
+
+ /* Intersect new rules with default regd if a new country setting was
+ * requested, i.e a default regd was already set during initialization
+ * and the regd coming from this event has a valid country info.
+ */
+ if (ab->default_regd[pdev_idx] &&
+ !ath11k_reg_is_world_alpha((char *)
+ ab->default_regd[pdev_idx]->alpha2) &&
+ !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
+ intersect = true;
+
+ ar = ab->pdevs[pdev_idx].ar;
+ vdev_type = ath11k_reg_get_ar_vdev_type(ar);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi handle chan list power type %d vdev type %d intersect %d\n",
+ power_type, vdev_type, intersect);
+
+ regd = ath11k_reg_build_regd(ab, reg_info, intersect, vdev_type, power_type);
+ if (!regd) {
+ ath11k_warn(ab, "failed to build regd from reg_info\n");
+ goto fallback;
+ }
+
+ if (power_type == IEEE80211_REG_UNSET_AP) {
+ ath11k_reg_reset_info(&ab->reg_info_store[pdev_idx]);
+ ab->reg_info_store[pdev_idx] = *reg_info;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ if (ab->default_regd[pdev_idx]) {
+ /* The initial rules from FW after WMI Init is to build
+ * the default regd. From then on, any rules updated for
+ * the pdev could be due to user reg changes.
+ * Free previously built regd before assigning the newly
+ * generated regd to ar. NULL pointer handling will be
+ * taken care by kfree itself.
+ */
+ ar = ab->pdevs[pdev_idx].ar;
+ kfree(ab->new_regd[pdev_idx]);
+ ab->new_regd[pdev_idx] = regd;
+ queue_work(ab->workqueue, &ar->regd_update_work);
+ } else {
+ /* This regd would be applied during mac registration and is
+ * held constant throughout for regd intersection purpose
+ */
+ ab->default_regd[pdev_idx] = regd;
+ }
+ ab->dfs_region = reg_info->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+
+fallback:
+ /* Fallback to older reg (by sending previous country setting
+ * again if fw has succeeded and we failed to process here.
+ * The Regdomain should be uniform across driver and fw. Since the
+ * FW has processed the command and sent a success status, we expect
+ * this function to succeed as well. If it doesn't, CTRY needs to be
+ * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
+ */
+ /* TODO: This is rare, but still should also be handled */
+ WARN_ON(1);
+
+retfail:
+
+ return -EINVAL;
+}
+
+void ath11k_regd_update_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k,
+ regd_update_work);
+ int ret;
+
+ ret = ath11k_regd_update(ar);
+ if (ret) {
+ /* Firmware has already moved to the new regd. We need
+ * to maintain channel consistency across FW, Host driver
+ * and userspace. Hence as a fallback mechanism we can set
+ * the prev or default country code to the firmware.
+ */
+ /* TODO: Implement Fallback Mechanism */
+ }
+}
+
+void ath11k_reg_init(struct ath11k *ar)
+{
+ ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER;
+ ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
+}
+
+void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info)
+{
+ int i, j;
+
+ if (!reg_info)
+ return;
+
+ kfree(reg_info->reg_rules_2ghz_ptr);
+ kfree(reg_info->reg_rules_5ghz_ptr);
+
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
+
+ for (j = 0; j < WMI_REG_MAX_CLIENT_TYPE; j++)
+ kfree(reg_info->reg_rules_6ghz_client_ptr[i][j]);
+ }
+
+ memset(reg_info, 0, sizeof(*reg_info));
+}
+
+void ath11k_reg_free(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++)
+ ath11k_reg_reset_info(&ab->reg_info_store[i]);
+
+ kfree(ab->reg_info_store);
+ ab->reg_info_store = NULL;
+
+ for (i = 0; i < ab->hw_params.max_radios; i++) {
+ kfree(ab->default_regd[i]);
+ kfree(ab->new_regd[i]);
+ }
+}
+
+int ath11k_reg_set_cc(struct ath11k *ar)
+{
+ struct wmi_set_current_country_params set_current_param = {};
+
+ memcpy(&set_current_param.alpha2, ar->alpha2, 2);
+ return ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+}
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
new file mode 100644
index 000000000000..72b483594015
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_REG_H
+#define ATH11K_REG_H
+
+#include <linux/kernel.h>
+#include <net/regulatory.h>
+
+struct ath11k_base;
+struct ath11k;
+
+/* DFS regdomains supported by Firmware */
+enum ath11k_dfs_region {
+ ATH11K_DFS_REG_UNSET,
+ ATH11K_DFS_REG_FCC,
+ ATH11K_DFS_REG_ETSI,
+ ATH11K_DFS_REG_MKK,
+ ATH11K_DFS_REG_CN,
+ ATH11K_DFS_REG_KR,
+ ATH11K_DFS_REG_MKK_N,
+ ATH11K_DFS_REG_UNDEF,
+};
+
+/* Phy bitmaps */
+#define ATH11K_REG_PHY_BITMAP_NO11AX BIT(5)
+
+/* ATH11K Regulatory API's */
+void ath11k_reg_init(struct ath11k *ar);
+void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info);
+void ath11k_reg_free(struct ath11k_base *ab);
+void ath11k_regd_update_work(struct work_struct *work);
+void ath11k_regd_update_chan_list_work(struct work_struct *work);
+struct ieee80211_regdomain *
+ath11k_reg_build_regd(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info, bool intersect,
+ enum wmi_vdev_type vdev_type,
+ enum ieee80211_ap_reg_power power_type);
+int ath11k_regd_update(struct ath11k *ar);
+int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait);
+enum wmi_reg_6ghz_ap_type
+ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type);
+int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info,
+ enum ieee80211_ap_reg_power power_type);
+int ath11k_reg_set_cc(struct ath11k *ar);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
new file mode 100644
index 000000000000..2da6da727278
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -0,0 +1,1506 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH11K_RX_DESC_H
+#define ATH11K_RX_DESC_H
+
+enum rx_desc_rxpcu_filter {
+ RX_DESC_RXPCU_FILTER_PASS,
+ RX_DESC_RXPCU_FILTER_MONITOR_CLIENT,
+ RX_DESC_RXPCU_FILTER_MONITOR_OTHER,
+};
+
+/* rxpcu_filter_pass
+ * This MPDU passed the normal frame filter programming of rxpcu.
+ *
+ * rxpcu_filter_monitor_client
+ * This MPDU did not pass the regular frame filter and would
+ * have been dropped, were it not for the frame fitting into the
+ * 'monitor_client' category.
+ *
+ * rxpcu_filter_monitor_other
+ * This MPDU did not pass the regular frame filter and also did
+ * not pass the rxpcu_monitor_client filter. It would have been
+ * dropped accept that it did pass the 'monitor_other' category.
+ */
+
+#define RX_DESC_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
+#define RX_DESC_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
+
+enum rx_desc_sw_frame_grp_id {
+ RX_DESC_SW_FRAME_GRP_ID_NDP_FRAME,
+ RX_DESC_SW_FRAME_GRP_ID_MCAST_DATA,
+ RX_DESC_SW_FRAME_GRP_ID_UCAST_DATA,
+ RX_DESC_SW_FRAME_GRP_ID_NULL_DATA,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0000,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0001,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0010,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0011,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0100,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0101,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0110,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0111,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1000,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1001,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1010,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1011,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1100,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1101,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1110,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1111,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0000,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0001,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0010,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0011,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0100,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0101,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0110,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0111,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1000,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1001,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1010,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1011,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1100,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1101,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1110,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1111,
+ RX_DESC_SW_FRAME_GRP_ID_UNSUPPORTED,
+ RX_DESC_SW_FRAME_GRP_ID_PHY_ERR,
+};
+
+enum rx_desc_decap_type {
+ RX_DESC_DECAP_TYPE_RAW,
+ RX_DESC_DECAP_TYPE_NATIVE_WIFI,
+ RX_DESC_DECAP_TYPE_ETHERNET2_DIX,
+ RX_DESC_DECAP_TYPE_8023,
+};
+
+enum rx_desc_decrypt_status_code {
+ RX_DESC_DECRYPT_STATUS_CODE_OK,
+ RX_DESC_DECRYPT_STATUS_CODE_UNPROTECTED_FRAME,
+ RX_DESC_DECRYPT_STATUS_CODE_DATA_ERR,
+ RX_DESC_DECRYPT_STATUS_CODE_KEY_INVALID,
+ RX_DESC_DECRYPT_STATUS_CODE_PEER_ENTRY_INVALID,
+ RX_DESC_DECRYPT_STATUS_CODE_OTHER,
+};
+
+#define RX_ATTENTION_INFO1_FIRST_MPDU BIT(0)
+#define RX_ATTENTION_INFO1_RSVD_1A BIT(1)
+#define RX_ATTENTION_INFO1_MCAST_BCAST BIT(2)
+#define RX_ATTENTION_INFO1_AST_IDX_NOT_FOUND BIT(3)
+#define RX_ATTENTION_INFO1_AST_IDX_TIMEDOUT BIT(4)
+#define RX_ATTENTION_INFO1_POWER_MGMT BIT(5)
+#define RX_ATTENTION_INFO1_NON_QOS BIT(6)
+#define RX_ATTENTION_INFO1_NULL_DATA BIT(7)
+#define RX_ATTENTION_INFO1_MGMT_TYPE BIT(8)
+#define RX_ATTENTION_INFO1_CTRL_TYPE BIT(9)
+#define RX_ATTENTION_INFO1_MORE_DATA BIT(10)
+#define RX_ATTENTION_INFO1_EOSP BIT(11)
+#define RX_ATTENTION_INFO1_A_MSDU_ERROR BIT(12)
+#define RX_ATTENTION_INFO1_FRAGMENT BIT(13)
+#define RX_ATTENTION_INFO1_ORDER BIT(14)
+#define RX_ATTENTION_INFO1_CCE_MATCH BIT(15)
+#define RX_ATTENTION_INFO1_OVERFLOW_ERR BIT(16)
+#define RX_ATTENTION_INFO1_MSDU_LEN_ERR BIT(17)
+#define RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL BIT(18)
+#define RX_ATTENTION_INFO1_IP_CKSUM_FAIL BIT(19)
+#define RX_ATTENTION_INFO1_SA_IDX_INVALID BIT(20)
+#define RX_ATTENTION_INFO1_DA_IDX_INVALID BIT(21)
+#define RX_ATTENTION_INFO1_RSVD_1B BIT(22)
+#define RX_ATTENTION_INFO1_RX_IN_TX_DECRYPT_BYP BIT(23)
+#define RX_ATTENTION_INFO1_ENCRYPT_REQUIRED BIT(24)
+#define RX_ATTENTION_INFO1_DIRECTED BIT(25)
+#define RX_ATTENTION_INFO1_BUFFER_FRAGMENT BIT(26)
+#define RX_ATTENTION_INFO1_MPDU_LEN_ERR BIT(27)
+#define RX_ATTENTION_INFO1_TKIP_MIC_ERR BIT(28)
+#define RX_ATTENTION_INFO1_DECRYPT_ERR BIT(29)
+#define RX_ATTENTION_INFO1_UNDECRYPT_FRAME_ERR BIT(30)
+#define RX_ATTENTION_INFO1_FCS_ERR BIT(31)
+
+#define RX_ATTENTION_INFO2_FLOW_IDX_TIMEOUT BIT(0)
+#define RX_ATTENTION_INFO2_FLOW_IDX_INVALID BIT(1)
+#define RX_ATTENTION_INFO2_WIFI_PARSER_ERR BIT(2)
+#define RX_ATTENTION_INFO2_AMSDU_PARSER_ERR BIT(3)
+#define RX_ATTENTION_INFO2_SA_IDX_TIMEOUT BIT(4)
+#define RX_ATTENTION_INFO2_DA_IDX_TIMEOUT BIT(5)
+#define RX_ATTENTION_INFO2_MSDU_LIMIT_ERR BIT(6)
+#define RX_ATTENTION_INFO2_DA_IS_VALID BIT(7)
+#define RX_ATTENTION_INFO2_DA_IS_MCBC BIT(8)
+#define RX_ATTENTION_INFO2_SA_IS_VALID BIT(9)
+#define RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE GENMASK(12, 10)
+#define RX_ATTENTION_INFO2_RX_BITMAP_NOT_UPDED BIT(13)
+#define RX_ATTENTION_INFO2_MSDU_DONE BIT(31)
+
+struct rx_attention {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+} __packed;
+
+/* rx_attention
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * first_mpdu
+ * Indicates the first MSDU of the PPDU. If both first_mpdu
+ * and last_mpdu are set in the MSDU then this is a not an
+ * A-MPDU frame but a stand alone MPDU. Interior MPDU in an
+ * A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ * 0. The PPDU start status will only be valid when this bit
+ * is set.
+ *
+ * mcast_bcast
+ * Multicast / broadcast indicator. Only set when the MAC
+ * address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ * matches one of the 4 BSSID registers. Only set when
+ * first_msdu is set.
+ *
+ * ast_index_not_found
+ * Only valid when first_msdu is set. Indicates no AST matching
+ * entries within the max search count.
+ *
+ * ast_index_timeout
+ * Only valid when first_msdu is set. Indicates an unsuccessful
+ * search in the address search table due to timeout.
+ *
+ * power_mgmt
+ * Power management bit set in the 802.11 header. Only set
+ * when first_msdu is set.
+ *
+ * non_qos
+ * Set if packet is not a non-QoS data frame. Only set when
+ * first_msdu is set.
+ *
+ * null_data
+ * Set if frame type indicates either null data or QoS null
+ * data format. Only set when first_msdu is set.
+ *
+ * mgmt_type
+ * Set if packet is a management packet. Only set when
+ * first_msdu is set.
+ *
+ * ctrl_type
+ * Set if packet is a control packet. Only set when first_msdu
+ * is set.
+ *
+ * more_data
+ * Set if more bit in frame control is set. Only set when
+ * first_msdu is set.
+ *
+ * eosp
+ * Set if the EOSP (end of service period) bit in the QoS
+ * control field is set. Only set when first_msdu is set.
+ *
+ * a_msdu_error
+ * Set if number of MSDUs in A-MSDU is above a threshold or if the
+ * size of the MSDU is invalid. This receive buffer will contain
+ * all of the remainder of MSDUs in this MPDU w/o decapsulation.
+ *
+ * fragment
+ * Indicates that this is an 802.11 fragment frame. This is
+ * set when either the more_frag bit is set in the frame
+ * control or the fragment number is not zero. Only set when
+ * first_msdu is set.
+ *
+ * order
+ * Set if the order bit in the frame control is set. Only set
+ * when first_msdu is set.
+ *
+ * cce_match
+ * Indicates that this status has a corresponding MSDU that
+ * requires FW processing. The OLE will have classification
+ * ring mask registers which will indicate the ring(s) for
+ * packets and descriptors which need FW attention.
+ *
+ * overflow_err
+ * PCU Receive FIFO does not have enough space to store the
+ * full receive packet. Enough space is reserved in the
+ * receive FIFO for the status is written. This MPDU remaining
+ * packets in the PPDU will be filtered and no Ack response
+ * will be transmitted.
+ *
+ * msdu_length_err
+ * Indicates that the MSDU length from the 802.3 encapsulated
+ * length field extends beyond the MPDU boundary.
+ *
+ * tcp_udp_chksum_fail
+ * Indicates that the computed checksum (tcp_udp_chksum) did
+ * not match the checksum in the TCP/UDP header.
+ *
+ * ip_chksum_fail
+ * Indicates that the computed checksum did not match the
+ * checksum in the IP header.
+ *
+ * sa_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the source MAC address.
+ *
+ * da_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the destination MAC address.
+ *
+ * rx_in_tx_decrypt_byp
+ * Indicates that RX packet is not decrypted as Crypto is busy
+ * with TX packet processing.
+ *
+ * encrypt_required
+ * Indicates that this data type frame is not encrypted even if
+ * the policy for this MPDU requires encryption as indicated in
+ * the peer table key type.
+ *
+ * directed
+ * MPDU is a directed packet which means that the RA matched
+ * our STA addresses. In proxySTA it means that the TA matched
+ * an entry in our address search table with the corresponding
+ * 'no_ack' bit is the address search entry cleared.
+ *
+ * buffer_fragment
+ * Indicates that at least one of the rx buffers has been
+ * fragmented. If set the FW should look at the rx_frag_info
+ * descriptor described below.
+ *
+ * mpdu_length_err
+ * Indicates that the MPDU was pre-maturely terminated
+ * resulting in a truncated MPDU. Don't trust the MPDU length
+ * field.
+ *
+ * tkip_mic_err
+ * Indicates that the MPDU Michael integrity check failed
+ *
+ * decrypt_err
+ * Indicates that the MPDU decrypt integrity check failed
+ *
+ * fcs_err
+ * Indicates that the MPDU FCS check failed
+ *
+ * flow_idx_timeout
+ * Indicates an unsuccessful flow search due to the expiring of
+ * the search timer.
+ *
+ * flow_idx_invalid
+ * flow id is not valid.
+ *
+ * amsdu_parser_error
+ * A-MSDU could not be properly de-agregated.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful search for the source MAC address
+ * due to the expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful search for the destination MAC
+ * address due to the expiring of the search timer.
+ *
+ * msdu_limit_error
+ * Indicates that the MSDU threshold was exceeded and thus
+ * all the rest of the MSDUs will not be scattered and will not
+ * be decasulated but will be DMA'ed in RAW format as a single
+ * MSDU buffer.
+ *
+ * da_is_valid
+ * Indicates that OLE found a valid DA entry.
+ *
+ * da_is_mcbc
+ * Field Only valid if da_is_valid is set. Indicates the DA address
+ * was a Multicast or Broadcast address.
+ *
+ * sa_is_valid
+ * Indicates that OLE found a valid SA entry.
+ *
+ * decrypt_status_code
+ * Field provides insight into the decryption performed. Values are
+ * defined in enum %RX_DESC_DECRYPT_STATUS_CODE*.
+ *
+ * rx_bitmap_not_updated
+ * Frame is received, but RXPCU could not update the receive bitmap
+ * due to (temporary) fifo constraints.
+ *
+ * msdu_done
+ * If set indicates that the RX packet data, RX header data, RX
+ * PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
+ * start/end descriptors and RX Attention descriptor are all
+ * valid. This bit must be in the last octet of the
+ * descriptor.
+ */
+
+#define RX_MPDU_START_INFO0_NDP_FRAME BIT(9)
+#define RX_MPDU_START_INFO0_PHY_ERR BIT(10)
+#define RX_MPDU_START_INFO0_PHY_ERR_MPDU_HDR BIT(11)
+#define RX_MPDU_START_INFO0_PROTO_VER_ERR BIT(12)
+#define RX_MPDU_START_INFO0_AST_LOOKUP_VALID BIT(13)
+
+#define RX_MPDU_START_INFO1_MPDU_FCTRL_VALID BIT(0)
+#define RX_MPDU_START_INFO1_MPDU_DUR_VALID BIT(1)
+#define RX_MPDU_START_INFO1_MAC_ADDR1_VALID BIT(2)
+#define RX_MPDU_START_INFO1_MAC_ADDR2_VALID BIT(3)
+#define RX_MPDU_START_INFO1_MAC_ADDR3_VALID BIT(4)
+#define RX_MPDU_START_INFO1_MAC_ADDR4_VALID BIT(5)
+#define RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID BIT(6)
+#define RX_MPDU_START_INFO1_MPDU_QOS_CTRL_VALID BIT(7)
+#define RX_MPDU_START_INFO1_MPDU_HT_CTRL_VALID BIT(8)
+#define RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID BIT(9)
+#define RX_MPDU_START_INFO1_MPDU_FRAG_NUMBER GENMASK(13, 10)
+#define RX_MPDU_START_INFO1_MORE_FRAG_FLAG BIT(14)
+#define RX_MPDU_START_INFO1_FROM_DS BIT(16)
+#define RX_MPDU_START_INFO1_TO_DS BIT(17)
+#define RX_MPDU_START_INFO1_ENCRYPTED BIT(18)
+#define RX_MPDU_START_INFO1_MPDU_RETRY BIT(19)
+#define RX_MPDU_START_INFO1_MPDU_SEQ_NUM GENMASK(31, 20)
+
+#define RX_MPDU_START_INFO2_EPD_EN BIT(0)
+#define RX_MPDU_START_INFO2_ALL_FRAME_ENCPD BIT(1)
+#define RX_MPDU_START_INFO2_ENC_TYPE GENMASK(5, 2)
+#define RX_MPDU_START_INFO2_VAR_WEP_KEY_WIDTH GENMASK(7, 6)
+#define RX_MPDU_START_INFO2_MESH_STA BIT(8)
+#define RX_MPDU_START_INFO2_BSSID_HIT BIT(9)
+#define RX_MPDU_START_INFO2_BSSID_NUM GENMASK(13, 10)
+#define RX_MPDU_START_INFO2_TID GENMASK(17, 14)
+#define RX_MPDU_START_INFO2_TID_WCN6855 GENMASK(18, 15)
+
+#define RX_MPDU_START_INFO3_REO_DEST_IND GENMASK(4, 0)
+#define RX_MPDU_START_INFO3_FLOW_ID_TOEPLITZ BIT(7)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_UCAST_DATA BIT(8)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_MCAST_DATA BIT(9)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_CTRL_BAR BIT(10)
+#define RX_MPDU_START_INFO3_RXDMA0_SRC_RING_SEL GENMASK(12, 11)
+#define RX_MPDU_START_INFO3_RXDMA0_DST_RING_SEL GENMASK(14, 13)
+
+#define RX_MPDU_START_INFO4_REO_QUEUE_DESC_HI GENMASK(7, 0)
+#define RX_MPDU_START_INFO4_RECV_QUEUE_NUM GENMASK(23, 8)
+#define RX_MPDU_START_INFO4_PRE_DELIM_ERR_WARN BIT(24)
+#define RX_MPDU_START_INFO4_FIRST_DELIM_ERR BIT(25)
+
+#define RX_MPDU_START_INFO5_KEY_ID GENMASK(7, 0)
+#define RX_MPDU_START_INFO5_NEW_PEER_ENTRY BIT(8)
+#define RX_MPDU_START_INFO5_DECRYPT_NEEDED BIT(9)
+#define RX_MPDU_START_INFO5_DECAP_TYPE GENMASK(11, 10)
+#define RX_MPDU_START_INFO5_VLAN_TAG_C_PADDING BIT(12)
+#define RX_MPDU_START_INFO5_VLAN_TAG_S_PADDING BIT(13)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_C BIT(14)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_S BIT(15)
+#define RX_MPDU_START_INFO5_PRE_DELIM_COUNT GENMASK(27, 16)
+#define RX_MPDU_START_INFO5_AMPDU_FLAG BIT(28)
+#define RX_MPDU_START_INFO5_BAR_FRAME BIT(29)
+
+#define RX_MPDU_START_INFO6_MPDU_LEN GENMASK(13, 0)
+#define RX_MPDU_START_INFO6_FIRST_MPDU BIT(14)
+#define RX_MPDU_START_INFO6_MCAST_BCAST BIT(15)
+#define RX_MPDU_START_INFO6_AST_IDX_NOT_FOUND BIT(16)
+#define RX_MPDU_START_INFO6_AST_IDX_TIMEOUT BIT(17)
+#define RX_MPDU_START_INFO6_POWER_MGMT BIT(18)
+#define RX_MPDU_START_INFO6_NON_QOS BIT(19)
+#define RX_MPDU_START_INFO6_NULL_DATA BIT(20)
+#define RX_MPDU_START_INFO6_MGMT_TYPE BIT(21)
+#define RX_MPDU_START_INFO6_CTRL_TYPE BIT(22)
+#define RX_MPDU_START_INFO6_MORE_DATA BIT(23)
+#define RX_MPDU_START_INFO6_EOSP BIT(24)
+#define RX_MPDU_START_INFO6_FRAGMENT BIT(25)
+#define RX_MPDU_START_INFO6_ORDER BIT(26)
+#define RX_MPDU_START_INFO6_UAPSD_TRIGGER BIT(27)
+#define RX_MPDU_START_INFO6_ENCRYPT_REQUIRED BIT(28)
+#define RX_MPDU_START_INFO6_DIRECTED BIT(29)
+
+#define RX_MPDU_START_RAW_MPDU BIT(0)
+
+struct rx_mpdu_start_ipq8074 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 pn[4];
+ __le32 peer_meta_data;
+ __le32 info3;
+ __le32 reo_queue_desc_lo;
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+ __le32 ht_ctrl;
+ __le32 raw;
+} __packed;
+
+#define RX_MPDU_START_INFO7_REO_DEST_IND GENMASK(4, 0)
+#define RX_MPDU_START_INFO7_LMAC_PEER_ID_MSB GENMASK(6, 5)
+#define RX_MPDU_START_INFO7_FLOW_ID_TOEPLITZ BIT(7)
+#define RX_MPDU_START_INFO7_PKT_SEL_FP_UCAST_DATA BIT(8)
+#define RX_MPDU_START_INFO7_PKT_SEL_FP_MCAST_DATA BIT(9)
+#define RX_MPDU_START_INFO7_PKT_SEL_FP_CTRL_BAR BIT(10)
+#define RX_MPDU_START_INFO7_RXDMA0_SRC_RING_SEL GENMASK(12, 11)
+#define RX_MPDU_START_INFO7_RXDMA0_DST_RING_SEL GENMASK(14, 13)
+
+#define RX_MPDU_START_INFO8_REO_QUEUE_DESC_HI GENMASK(7, 0)
+#define RX_MPDU_START_INFO8_RECV_QUEUE_NUM GENMASK(23, 8)
+#define RX_MPDU_START_INFO8_PRE_DELIM_ERR_WARN BIT(24)
+#define RX_MPDU_START_INFO8_FIRST_DELIM_ERR BIT(25)
+
+#define RX_MPDU_START_INFO9_EPD_EN BIT(0)
+#define RX_MPDU_START_INFO9_ALL_FRAME_ENCPD BIT(1)
+#define RX_MPDU_START_INFO9_ENC_TYPE GENMASK(5, 2)
+#define RX_MPDU_START_INFO9_VAR_WEP_KEY_WIDTH GENMASK(7, 6)
+#define RX_MPDU_START_INFO9_MESH_STA GENMASK(9, 8)
+#define RX_MPDU_START_INFO9_BSSID_HIT BIT(10)
+#define RX_MPDU_START_INFO9_BSSID_NUM GENMASK(14, 11)
+#define RX_MPDU_START_INFO9_TID GENMASK(18, 15)
+
+#define RX_MPDU_START_INFO10_RXPCU_MPDU_FLTR GENMASK(1, 0)
+#define RX_MPDU_START_INFO10_SW_FRAME_GRP_ID GENMASK(8, 2)
+#define RX_MPDU_START_INFO10_NDP_FRAME BIT(9)
+#define RX_MPDU_START_INFO10_PHY_ERR BIT(10)
+#define RX_MPDU_START_INFO10_PHY_ERR_MPDU_HDR BIT(11)
+#define RX_MPDU_START_INFO10_PROTO_VER_ERR BIT(12)
+#define RX_MPDU_START_INFO10_AST_LOOKUP_VALID BIT(13)
+
+#define RX_MPDU_START_INFO11_MPDU_FCTRL_VALID BIT(0)
+#define RX_MPDU_START_INFO11_MPDU_DUR_VALID BIT(1)
+#define RX_MPDU_START_INFO11_MAC_ADDR1_VALID BIT(2)
+#define RX_MPDU_START_INFO11_MAC_ADDR2_VALID BIT(3)
+#define RX_MPDU_START_INFO11_MAC_ADDR3_VALID BIT(4)
+#define RX_MPDU_START_INFO11_MAC_ADDR4_VALID BIT(5)
+#define RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID BIT(6)
+#define RX_MPDU_START_INFO11_MPDU_QOS_CTRL_VALID BIT(7)
+#define RX_MPDU_START_INFO11_MPDU_HT_CTRL_VALID BIT(8)
+#define RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID BIT(9)
+#define RX_MPDU_START_INFO11_MPDU_FRAG_NUMBER GENMASK(13, 10)
+#define RX_MPDU_START_INFO11_MORE_FRAG_FLAG BIT(14)
+#define RX_MPDU_START_INFO11_FROM_DS BIT(16)
+#define RX_MPDU_START_INFO11_TO_DS BIT(17)
+#define RX_MPDU_START_INFO11_ENCRYPTED BIT(18)
+#define RX_MPDU_START_INFO11_MPDU_RETRY BIT(19)
+#define RX_MPDU_START_INFO11_MPDU_SEQ_NUM GENMASK(31, 20)
+
+#define RX_MPDU_START_INFO12_KEY_ID GENMASK(7, 0)
+#define RX_MPDU_START_INFO12_NEW_PEER_ENTRY BIT(8)
+#define RX_MPDU_START_INFO12_DECRYPT_NEEDED BIT(9)
+#define RX_MPDU_START_INFO12_DECAP_TYPE GENMASK(11, 10)
+#define RX_MPDU_START_INFO12_VLAN_TAG_C_PADDING BIT(12)
+#define RX_MPDU_START_INFO12_VLAN_TAG_S_PADDING BIT(13)
+#define RX_MPDU_START_INFO12_STRIP_VLAN_TAG_C BIT(14)
+#define RX_MPDU_START_INFO12_STRIP_VLAN_TAG_S BIT(15)
+#define RX_MPDU_START_INFO12_PRE_DELIM_COUNT GENMASK(27, 16)
+#define RX_MPDU_START_INFO12_AMPDU_FLAG BIT(28)
+#define RX_MPDU_START_INFO12_BAR_FRAME BIT(29)
+#define RX_MPDU_START_INFO12_RAW_MPDU BIT(30)
+
+#define RX_MPDU_START_INFO13_MPDU_LEN GENMASK(13, 0)
+#define RX_MPDU_START_INFO13_FIRST_MPDU BIT(14)
+#define RX_MPDU_START_INFO13_MCAST_BCAST BIT(15)
+#define RX_MPDU_START_INFO13_AST_IDX_NOT_FOUND BIT(16)
+#define RX_MPDU_START_INFO13_AST_IDX_TIMEOUT BIT(17)
+#define RX_MPDU_START_INFO13_POWER_MGMT BIT(18)
+#define RX_MPDU_START_INFO13_NON_QOS BIT(19)
+#define RX_MPDU_START_INFO13_NULL_DATA BIT(20)
+#define RX_MPDU_START_INFO13_MGMT_TYPE BIT(21)
+#define RX_MPDU_START_INFO13_CTRL_TYPE BIT(22)
+#define RX_MPDU_START_INFO13_MORE_DATA BIT(23)
+#define RX_MPDU_START_INFO13_EOSP BIT(24)
+#define RX_MPDU_START_INFO13_FRAGMENT BIT(25)
+#define RX_MPDU_START_INFO13_ORDER BIT(26)
+#define RX_MPDU_START_INFO13_UAPSD_TRIGGER BIT(27)
+#define RX_MPDU_START_INFO13_ENCRYPT_REQUIRED BIT(28)
+#define RX_MPDU_START_INFO13_DIRECTED BIT(29)
+#define RX_MPDU_START_INFO13_AMSDU_PRESENT BIT(30)
+
+struct rx_mpdu_start_qcn9074 {
+ __le32 info7;
+ __le32 reo_queue_desc_lo;
+ __le32 info8;
+ __le32 pn[4];
+ __le32 info9;
+ __le32 peer_meta_data;
+ __le16 info10;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info11;
+ __le32 info12;
+ __le32 info13;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+ __le32 ht_ctrl;
+} __packed;
+
+struct rx_mpdu_start_wcn6855 {
+ __le32 info3;
+ __le32 reo_queue_desc_lo;
+ __le32 info4;
+ __le32 pn[4];
+ __le32 info2;
+ __le32 peer_meta_data;
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info1;
+ __le32 info5;
+ __le32 info6;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+ __le32 ht_ctrl;
+} __packed;
+
+/* rx_mpdu_start
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ * Note: for ndp frame, if it was expected because the preceding
+ * NDPA was filter_pass, the setting rxpcu_filter_pass will be
+ * used. This setting will also be used for every ndp frame in
+ * case Promiscuous mode is enabled.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * ndp_frame
+ * Indicates that the received frame was an NDP frame.
+ *
+ * phy_err
+ * Indicates that PHY error was received before MAC received data.
+ *
+ * phy_err_during_mpdu_header
+ * PHY error was received before MAC received the complete MPDU
+ * header which was needed for proper decoding.
+ *
+ * protocol_version_err
+ * RXPCU detected a version error in the frame control field.
+ *
+ * ast_based_lookup_valid
+ * AST based lookup for this frame has found a valid result.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * ast_index
+ * This field indicates the index of the AST entry corresponding
+ * to this MPDU. It is provided by the GSE module instantiated in
+ * RXPCU. A value of 0xFFFF indicates an invalid AST index.
+ *
+ * sw_peer_id
+ * This field indicates a unique peer identifier. It is set equal
+ * to field 'sw_peer_id' from the AST entry.
+ *
+ * mpdu_frame_control_valid, mpdu_duration_valid, mpdu_qos_control_valid,
+ * mpdu_ht_control_valid, frame_encryption_info_valid
+ * Indicates that each fields have valid entries.
+ *
+ * mac_addr_adx_valid
+ * Corresponding mac_addr_adx_{lo/hi} has valid entries.
+ *
+ * from_ds, to_ds
+ * Valid only when mpdu_frame_control_valid is set. Indicates that
+ * frame is received from DS and sent to DS.
+ *
+ * encrypted
+ * Protected bit from the frame control.
+ *
+ * mpdu_retry
+ * Retry bit from frame control. Only valid when first_msdu is set.
+ *
+ * mpdu_sequence_number
+ * The sequence number from the 802.11 header.
+ *
+ * epd_en
+ * If set, use EPD instead of LPD.
+ *
+ * all_frames_shall_be_encrypted
+ * If set, all frames (data only?) shall be encrypted. If not,
+ * RX CRYPTO shall set an error flag.
+ *
+ * encrypt_type
+ * Values are defined in enum %HAL_ENCRYPT_TYPE_.
+ *
+ * mesh_sta
+ * Indicates a Mesh (11s) STA.
+ *
+ * bssid_hit
+ * BSSID of the incoming frame matched one of the 8 BSSID
+ * register values.
+ *
+ * bssid_number
+ * This number indicates which one out of the 8 BSSID register
+ * values matched the incoming frame.
+ *
+ * tid
+ * TID field in the QoS control field
+ *
+ * pn
+ * The PN number.
+ *
+ * peer_meta_data
+ * Meta data that SW has programmed in the Peer table entry
+ * of the transmitting STA.
+ *
+ * rx_reo_queue_desc_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * rx_reo_queue_desc_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * receive_queue_number
+ * Indicates the MPDU queue ID to which this MPDU link
+ * descriptor belongs.
+ *
+ * pre_delim_err_warning
+ * Indicates that a delimiter FCS error was found in between the
+ * previous MPDU and this MPDU. Note that this is just a warning,
+ * and does not mean that this MPDU is corrupted in any way. If
+ * it is, there will be other errors indicated such as FCS or
+ * decrypt errors.
+ *
+ * first_delim_err
+ * Indicates that the first delimiter had a FCS failure.
+ *
+ * key_id
+ * The key ID octet from the IV.
+ *
+ * new_peer_entry
+ * Set if new RX_PEER_ENTRY TLV follows. If clear, RX_PEER_ENTRY
+ * doesn't follow so RX DECRYPTION module either uses old peer
+ * entry or not decrypt.
+ *
+ * decrypt_needed
+ * When RXPCU sets bit 'ast_index_not_found or ast_index_timeout',
+ * RXPCU will also ensure that this bit is NOT set. CRYPTO for that
+ * reason only needs to evaluate this bit and non of the other ones
+ *
+ * decap_type
+ * Used by the OLE during decapsulation. Values are defined in
+ * enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * rx_insert_vlan_c_tag_padding
+ * rx_insert_vlan_s_tag_padding
+ * Insert 4 byte of all zeros as VLAN tag or double VLAN tag if
+ * the rx payload does not have VLAN.
+ *
+ * strip_vlan_c_tag_decap
+ * strip_vlan_s_tag_decap
+ * Strip VLAN or double VLAN during decapsulation.
+ *
+ * pre_delim_count
+ * The number of delimiters before this MPDU. Note that this
+ * number is cleared at PPDU start. If this MPDU is the first
+ * received MPDU in the PPDU and this MPDU gets filtered-in,
+ * this field will indicate the number of delimiters located
+ * after the last MPDU in the previous PPDU.
+ *
+ * If this MPDU is located after the first received MPDU in
+ * an PPDU, this field will indicate the number of delimiters
+ * located between the previous MPDU and this MPDU.
+ *
+ * ampdu_flag
+ * Received frame was part of an A-MPDU.
+ *
+ * bar_frame
+ * Received frame is a BAR frame
+ *
+ * mpdu_length
+ * MPDU length before decapsulation.
+ *
+ * first_mpdu..directed
+ * See definition in RX attention descriptor
+ *
+ */
+
+enum rx_msdu_start_pkt_type {
+ RX_MSDU_START_PKT_TYPE_11A,
+ RX_MSDU_START_PKT_TYPE_11B,
+ RX_MSDU_START_PKT_TYPE_11N,
+ RX_MSDU_START_PKT_TYPE_11AC,
+ RX_MSDU_START_PKT_TYPE_11AX,
+};
+
+enum rx_msdu_start_sgi {
+ RX_MSDU_START_SGI_0_8_US,
+ RX_MSDU_START_SGI_0_4_US,
+ RX_MSDU_START_SGI_1_6_US,
+ RX_MSDU_START_SGI_3_2_US,
+};
+
+enum rx_msdu_start_recv_bw {
+ RX_MSDU_START_RECV_BW_20MHZ,
+ RX_MSDU_START_RECV_BW_40MHZ,
+ RX_MSDU_START_RECV_BW_80MHZ,
+ RX_MSDU_START_RECV_BW_160MHZ,
+};
+
+enum rx_msdu_start_reception_type {
+ RX_MSDU_START_RECEPTION_TYPE_SU,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
+};
+
+#define RX_MSDU_START_INFO1_MSDU_LENGTH GENMASK(13, 0)
+#define RX_MSDU_START_INFO1_RSVD_1A BIT(14)
+#define RX_MSDU_START_INFO1_IPSEC_ESP BIT(15)
+#define RX_MSDU_START_INFO1_L3_OFFSET GENMASK(22, 16)
+#define RX_MSDU_START_INFO1_IPSEC_AH BIT(23)
+#define RX_MSDU_START_INFO1_L4_OFFSET GENMASK(31, 24)
+
+#define RX_MSDU_START_INFO2_MSDU_NUMBER GENMASK(7, 0)
+#define RX_MSDU_START_INFO2_DECAP_TYPE GENMASK(9, 8)
+#define RX_MSDU_START_INFO2_IPV4 BIT(10)
+#define RX_MSDU_START_INFO2_IPV6 BIT(11)
+#define RX_MSDU_START_INFO2_TCP BIT(12)
+#define RX_MSDU_START_INFO2_UDP BIT(13)
+#define RX_MSDU_START_INFO2_IP_FRAG BIT(14)
+#define RX_MSDU_START_INFO2_TCP_ONLY_ACK BIT(15)
+#define RX_MSDU_START_INFO2_DA_IS_BCAST_MCAST BIT(16)
+#define RX_MSDU_START_INFO2_SELECTED_TOEPLITZ_HASH GENMASK(18, 17)
+#define RX_MSDU_START_INFO2_IP_FIXED_HDR_VALID BIT(19)
+#define RX_MSDU_START_INFO2_IP_EXTN_HDR_VALID BIT(20)
+#define RX_MSDU_START_INFO2_IP_TCP_UDP_HDR_VALID BIT(21)
+#define RX_MSDU_START_INFO2_MESH_CTRL_PRESENT BIT(22)
+#define RX_MSDU_START_INFO2_LDPC BIT(23)
+#define RX_MSDU_START_INFO2_IP4_IP6_NXT_HDR GENMASK(31, 24)
+#define RX_MSDU_START_INFO2_DECAP_FORMAT GENMASK(9, 8)
+
+#define RX_MSDU_START_INFO3_USER_RSSI GENMASK(7, 0)
+#define RX_MSDU_START_INFO3_PKT_TYPE GENMASK(11, 8)
+#define RX_MSDU_START_INFO3_STBC BIT(12)
+#define RX_MSDU_START_INFO3_SGI GENMASK(14, 13)
+#define RX_MSDU_START_INFO3_RATE_MCS GENMASK(18, 15)
+#define RX_MSDU_START_INFO3_RECV_BW GENMASK(20, 19)
+#define RX_MSDU_START_INFO3_RECEPTION_TYPE GENMASK(23, 21)
+#define RX_MSDU_START_INFO3_MIMO_SS_BITMAP GENMASK(31, 24)
+
+struct rx_msdu_start_ipq8074 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 toeplitz_hash;
+ __le32 flow_id_toeplitz;
+ __le32 info3;
+ __le32 ppdu_start_timestamp;
+ __le32 phy_meta_data;
+} __packed;
+
+struct rx_msdu_start_qcn9074 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 toeplitz_hash;
+ __le32 flow_id_toeplitz;
+ __le32 info3;
+ __le32 ppdu_start_timestamp;
+ __le32 phy_meta_data;
+ __le16 vlan_ctag_c1;
+ __le16 vlan_stag_c1;
+} __packed;
+
+struct rx_msdu_start_wcn6855 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 toeplitz_hash;
+ __le32 flow_id_toeplitz;
+ __le32 info3;
+ __le32 ppdu_start_timestamp;
+ __le32 phy_meta_data;
+ __le16 vlan_ctag_ci;
+ __le16 vlan_stag_ci;
+} __packed;
+
+/* rx_msdu_start
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * msdu_length
+ * MSDU length in bytes after decapsulation.
+ *
+ * ipsec_esp
+ * Set if IPv4/v6 packet is using IPsec ESP.
+ *
+ * l3_offset
+ * Depending upon mode bit, this field either indicates the
+ * L3 offset in bytes from the start of the RX_HEADER or the IP
+ * offset in bytes from the start of the packet after
+ * decapsulation. The latter is only valid if ipv4_proto or
+ * ipv6_proto is set.
+ *
+ * ipsec_ah
+ * Set if IPv4/v6 packet is using IPsec AH
+ *
+ * l4_offset
+ * Depending upon mode bit, this field either indicates the
+ * L4 offset in bytes from the start of RX_HEADER (only valid
+ * if either ipv4_proto or ipv6_proto is set to 1) or indicates
+ * the offset in bytes to the start of TCP or UDP header from
+ * the start of the IP header after decapsulation (Only valid if
+ * tcp_proto or udp_proto is set). The value 0 indicates that
+ * the offset is longer than 127 bytes.
+ *
+ * msdu_number
+ * Indicates the MSDU number within a MPDU. This value is
+ * reset to zero at the start of each MPDU. If the number of
+ * MSDU exceeds 255 this number will wrap using modulo 256.
+ *
+ * decap_type
+ * Indicates the format after decapsulation. Values are defined in
+ * enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * ipv4_proto
+ * Set if L2 layer indicates IPv4 protocol.
+ *
+ * ipv6_proto
+ * Set if L2 layer indicates IPv6 protocol.
+ *
+ * tcp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ * indicates TCP.
+ *
+ * udp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ * indicates UDP.
+ *
+ * ip_frag
+ * Indicates that either the IP More frag bit is set or IP frag
+ * number is non-zero. If set indicates that this is a fragmented
+ * IP packet.
+ *
+ * tcp_only_ack
+ * Set if only the TCP Ack bit is set in the TCP flags and if
+ * the TCP payload is 0.
+ *
+ * da_is_bcast_mcast
+ * The destination address is broadcast or multicast.
+ *
+ * toeplitz_hash
+ * Actual chosen Hash.
+ * 0 - Toeplitz hash of 2-tuple (IP source address, IP
+ * destination address)
+ * 1 - Toeplitz hash of 4-tuple (IP source address,
+ * IP destination address, L4 (TCP/UDP) source port,
+ * L4 (TCP/UDP) destination port)
+ * 2 - Toeplitz of flow_id
+ * 3 - Zero is used
+ *
+ * ip_fixed_header_valid
+ * Fixed 20-byte IPv4 header or 40-byte IPv6 header parsed
+ * fully within first 256 bytes of the packet
+ *
+ * ip_extn_header_valid
+ * IPv6/IPv6 header, including IPv4 options and
+ * recognizable extension headers parsed fully within first 256
+ * bytes of the packet
+ *
+ * tcp_udp_header_valid
+ * Fixed 20-byte TCP (excluding TCP options) or 8-byte UDP
+ * header parsed fully within first 256 bytes of the packet
+ *
+ * mesh_control_present
+ * When set, this MSDU includes the 'Mesh Control' field
+ *
+ * ldpc
+ *
+ * ip4_protocol_ip6_next_header
+ * For IPv4, this is the 8 bit protocol field set). For IPv6 this
+ * is the 8 bit next_header field.
+ *
+ * toeplitz_hash_2_or_4
+ * Controlled by RxOLE register - If register bit set to 0,
+ * Toeplitz hash is computed over 2-tuple IPv4 or IPv6 src/dest
+ * addresses; otherwise, toeplitz hash is computed over 4-tuple
+ * IPv4 or IPv6 src/dest addresses and src/dest ports.
+ *
+ * flow_id_toeplitz
+ * Toeplitz hash of 5-tuple
+ * {IP source address, IP destination address, IP source port, IP
+ * destination port, L4 protocol} in case of non-IPSec.
+ *
+ * In case of IPSec - Toeplitz hash of 4-tuple
+ * {IP source address, IP destination address, SPI, L4 protocol}
+ *
+ * The relevant Toeplitz key registers are provided in RxOLE's
+ * instance of common parser module. These registers are separate
+ * from the Toeplitz keys used by ASE/FSE modules inside RxOLE.
+ * The actual value will be passed on from common parser module
+ * to RxOLE in one of the WHO_* TLVs.
+ *
+ * user_rssi
+ * RSSI for this user
+ *
+ * pkt_type
+ * Values are defined in enum %RX_MSDU_START_PKT_TYPE_*.
+ *
+ * stbc
+ * When set, use STBC transmission rates.
+ *
+ * sgi
+ * Field only valid when pkt type is HT, VHT or HE. Values are
+ * defined in enum %RX_MSDU_START_SGI_*.
+ *
+ * rate_mcs
+ * MCS Rate used.
+ *
+ * receive_bandwidth
+ * Full receive Bandwidth. Values are defined in enum
+ * %RX_MSDU_START_RECV_*.
+ *
+ * reception_type
+ * Indicates what type of reception this is and defined in enum
+ * %RX_MSDU_START_RECEPTION_TYPE_*.
+ *
+ * mimo_ss_bitmap
+ * Field only valid when
+ * Reception_type is RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO or
+ * RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO.
+ *
+ * Bitmap, with each bit indicating if the related spatial
+ * stream is used for this STA
+ *
+ * LSB related to SS 0
+ *
+ * 0 - spatial stream not used for this reception
+ * 1 - spatial stream used for this reception
+ *
+ * ppdu_start_timestamp
+ * Timestamp that indicates when the PPDU that contained this MPDU
+ * started on the medium.
+ *
+ * phy_meta_data
+ * SW programmed Meta data provided by the PHY. Can be used for SW
+ * to indicate the channel the device is on.
+ */
+
+#define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
+#define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
+
+#define RX_MSDU_END_INFO1_KEY_ID GENMASK(7, 0)
+#define RX_MSDU_END_INFO1_CCE_SUPER_RULE GENMASK(13, 8)
+#define RX_MSDU_END_INFO1_CCND_TRUNCATE BIT(14)
+#define RX_MSDU_END_INFO1_CCND_CCE_DIS BIT(15)
+#define RX_MSDU_END_INFO1_EXT_WAPI_PN GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO2_REPORTED_MPDU_LEN GENMASK(13, 0)
+#define RX_MSDU_END_INFO2_FIRST_MSDU BIT(14)
+#define RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855 BIT(28)
+#define RX_MSDU_END_INFO2_LAST_MSDU BIT(15)
+#define RX_MSDU_END_INFO2_LAST_MSDU_WCN6855 BIT(29)
+#define RX_MSDU_END_INFO2_SA_IDX_TIMEOUT BIT(16)
+#define RX_MSDU_END_INFO2_DA_IDX_TIMEOUT BIT(17)
+#define RX_MSDU_END_INFO2_MSDU_LIMIT_ERR BIT(18)
+#define RX_MSDU_END_INFO2_FLOW_IDX_TIMEOUT BIT(19)
+#define RX_MSDU_END_INFO2_FLOW_IDX_INVALID BIT(20)
+#define RX_MSDU_END_INFO2_WIFI_PARSER_ERR BIT(21)
+#define RX_MSDU_END_INFO2_AMSDU_PARSET_ERR BIT(22)
+#define RX_MSDU_END_INFO2_SA_IS_VALID BIT(23)
+#define RX_MSDU_END_INFO2_DA_IS_VALID BIT(24)
+#define RX_MSDU_END_INFO2_DA_IS_MCBC BIT(25)
+#define RX_MSDU_END_INFO2_L3_HDR_PADDING GENMASK(27, 26)
+
+#define RX_MSDU_END_INFO3_TCP_FLAG GENMASK(8, 0)
+#define RX_MSDU_END_INFO3_LRO_ELIGIBLE BIT(9)
+
+#define RX_MSDU_END_INFO4_DA_OFFSET GENMASK(5, 0)
+#define RX_MSDU_END_INFO4_SA_OFFSET GENMASK(11, 6)
+#define RX_MSDU_END_INFO4_DA_OFFSET_VALID BIT(12)
+#define RX_MSDU_END_INFO4_SA_OFFSET_VALID BIT(13)
+#define RX_MSDU_END_INFO4_L3_TYPE GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO5_MSDU_DROP BIT(0)
+#define RX_MSDU_END_INFO5_REO_DEST_IND GENMASK(5, 1)
+#define RX_MSDU_END_INFO5_FLOW_IDX GENMASK(25, 6)
+
+struct rx_msdu_end_ipq8074 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ip_hdr_cksum;
+ __le16 tcp_udp_cksum;
+ __le32 info1;
+ __le32 ext_wapi_pn[2];
+ __le32 info2;
+ __le32 ipv6_options_crc;
+ __le32 tcp_seq_num;
+ __le32 tcp_ack_num;
+ __le16 info3;
+ __le16 window_size;
+ __le32 info4;
+ __le32 rule_indication[2];
+ __le16 sa_idx;
+ __le16 da_idx;
+ __le32 info5;
+ __le32 fse_metadata;
+ __le16 cce_metadata;
+ __le16 sa_sw_peer_id;
+} __packed;
+
+struct rx_msdu_end_wcn6855 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ip_hdr_cksum;
+ __le16 reported_mpdu_len;
+ __le32 info1;
+ __le32 ext_wapi_pn[2];
+ __le32 info4;
+ __le32 ipv6_options_crc;
+ __le32 tcp_seq_num;
+ __le32 tcp_ack_num;
+ __le16 info3;
+ __le16 window_size;
+ __le32 info2;
+ __le16 sa_idx;
+ __le16 da_idx;
+ __le32 info5;
+ __le32 fse_metadata;
+ __le16 cce_metadata;
+ __le16 sa_sw_peer_id;
+ __le32 rule_indication[2];
+ __le32 info6;
+ __le32 info7;
+} __packed;
+
+#define RX_MSDU_END_MPDU_LENGTH_INFO GENMASK(13, 0)
+
+#define RX_MSDU_END_INFO2_DA_OFFSET GENMASK(5, 0)
+#define RX_MSDU_END_INFO2_SA_OFFSET GENMASK(11, 6)
+#define RX_MSDU_END_INFO2_DA_OFFSET_VALID BIT(12)
+#define RX_MSDU_END_INFO2_SA_OFFSET_VALID BIT(13)
+#define RX_MSDU_END_INFO2_L3_TYPE GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO4_SA_IDX_TIMEOUT BIT(0)
+#define RX_MSDU_END_INFO4_DA_IDX_TIMEOUT BIT(1)
+#define RX_MSDU_END_INFO4_MSDU_LIMIT_ERR BIT(2)
+#define RX_MSDU_END_INFO4_FLOW_IDX_TIMEOUT BIT(3)
+#define RX_MSDU_END_INFO4_FLOW_IDX_INVALID BIT(4)
+#define RX_MSDU_END_INFO4_WIFI_PARSER_ERR BIT(5)
+#define RX_MSDU_END_INFO4_AMSDU_PARSER_ERR BIT(6)
+#define RX_MSDU_END_INFO4_SA_IS_VALID BIT(7)
+#define RX_MSDU_END_INFO4_DA_IS_VALID BIT(8)
+#define RX_MSDU_END_INFO4_DA_IS_MCBC BIT(9)
+#define RX_MSDU_END_INFO4_L3_HDR_PADDING GENMASK(11, 10)
+#define RX_MSDU_END_INFO4_FIRST_MSDU BIT(12)
+#define RX_MSDU_END_INFO4_LAST_MSDU BIT(13)
+
+#define RX_MSDU_END_INFO6_AGGR_COUNT GENMASK(7, 0)
+#define RX_MSDU_END_INFO6_FLOW_AGGR_CONTN BIT(8)
+#define RX_MSDU_END_INFO6_FISA_TIMEOUT BIT(9)
+
+struct rx_msdu_end_qcn9074 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ip_hdr_cksum;
+ __le16 mpdu_length_info;
+ __le32 info1;
+ __le32 rule_indication[2];
+ __le32 info2;
+ __le32 ipv6_options_crc;
+ __le32 tcp_seq_num;
+ __le32 tcp_ack_num;
+ __le16 info3;
+ __le16 window_size;
+ __le16 tcp_udp_cksum;
+ __le16 info4;
+ __le16 sa_idx;
+ __le16 da_idx;
+ __le32 info5;
+ __le32 fse_metadata;
+ __le16 cce_metadata;
+ __le16 sa_sw_peer_id;
+ __le32 info6;
+ __le16 cum_l4_cksum;
+ __le16 cum_ip_length;
+} __packed;
+
+/* rx_msdu_end
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * ip_hdr_cksum
+ * This can include the IP header checksum or the pseudo
+ * header checksum used by TCP/UDP checksum.
+ *
+ * tcp_udp_chksum
+ * The value of the computed TCP/UDP checksum. A mode bit
+ * selects whether this checksum is the full checksum or the
+ * partial checksum which does not include the pseudo header.
+ *
+ * key_id
+ * The key ID octet from the IV. Only valid when first_msdu is set.
+ *
+ * cce_super_rule
+ * Indicates the super filter rule.
+ *
+ * cce_classify_not_done_truncate
+ * Classification failed due to truncated frame.
+ *
+ * cce_classify_not_done_cce_dis
+ * Classification failed due to CCE global disable
+ *
+ * ext_wapi_pn*
+ * Extension PN (packet number) which is only used by WAPI.
+ *
+ * reported_mpdu_length
+ * MPDU length before decapsulation. Only valid when first_msdu is
+ * set. This field is taken directly from the length field of the
+ * A-MPDU delimiter or the preamble length field for non-A-MPDU
+ * frames.
+ *
+ * first_msdu
+ * Indicates the first MSDU of A-MSDU. If both first_msdu and
+ * last_msdu are set in the MSDU then this is a non-aggregated MSDU
+ * frame: normal MPDU. Interior MSDU in an A-MSDU shall have both
+ * first_mpdu and last_mpdu bits set to 0.
+ *
+ * last_msdu
+ * Indicates the last MSDU of the A-MSDU. MPDU end status is only
+ * valid when last_msdu is set.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful MAC source address search due to the
+ * expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful MAC destination address search due to
+ * the expiring of the search timer.
+ *
+ * msdu_limit_error
+ * Indicates that the MSDU threshold was exceeded and thus all the
+ * rest of the MSDUs will not be scattered and will not be
+ * decapsulated but will be DMA'ed in RAW format as a single MSDU.
+ *
+ * flow_idx_timeout
+ * Indicates an unsuccessful flow search due to the expiring of
+ * the search timer.
+ *
+ * flow_idx_invalid
+ * flow id is not valid.
+ *
+ * amsdu_parser_error
+ * A-MSDU could not be properly de-agregated.
+ *
+ * sa_is_valid
+ * Indicates that OLE found a valid SA entry.
+ *
+ * da_is_valid
+ * Indicates that OLE found a valid DA entry.
+ *
+ * da_is_mcbc
+ * Field Only valid if da_is_valid is set. Indicates the DA address
+ * was a Multicast of Broadcast address.
+ *
+ * l3_header_padding
+ * Number of bytes padded to make sure that the L3 header will
+ * always start of a Dword boundary.
+ *
+ * ipv6_options_crc
+ * 32 bit CRC computed out of IP v6 extension headers.
+ *
+ * tcp_seq_number
+ * TCP sequence number.
+ *
+ * tcp_ack_number
+ * TCP acknowledge number.
+ *
+ * tcp_flag
+ * TCP flags {NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN}.
+ *
+ * lro_eligible
+ * Computed out of TCP and IP fields to indicate that this
+ * MSDU is eligible for LRO.
+ *
+ * window_size
+ * TCP receive window size.
+ *
+ * da_offset
+ * Offset into MSDU buffer for DA.
+ *
+ * sa_offset
+ * Offset into MSDU buffer for SA.
+ *
+ * da_offset_valid
+ * da_offset field is valid. This will be set to 0 in case
+ * of a dynamic A-MSDU when DA is compressed.
+ *
+ * sa_offset_valid
+ * sa_offset field is valid. This will be set to 0 in case
+ * of a dynamic A-MSDU when SA is compressed.
+ *
+ * l3_type
+ * The 16-bit type value indicating the type of L3 later
+ * extracted from LLC/SNAP, set to zero if SNAP is not
+ * available.
+ *
+ * rule_indication
+ * Bitmap indicating which of rules have matched.
+ *
+ * sa_idx
+ * The offset in the address table which matches MAC source address
+ *
+ * da_idx
+ * The offset in the address table which matches MAC destination
+ * address.
+ *
+ * msdu_drop
+ * REO shall drop this MSDU and not forward it to any other ring.
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * flow_idx
+ * Flow table index.
+ *
+ * fse_metadata
+ * FSE related meta data.
+ *
+ * cce_metadata
+ * CCE related meta data.
+ *
+ * sa_sw_peer_id
+ * sw_peer_id from the address search entry corresponding to the
+ * source address of the MSDU.
+ */
+
+enum rx_mpdu_end_rxdma_dest_ring {
+ RX_MPDU_END_RXDMA_DEST_RING_RELEASE,
+ RX_MPDU_END_RXDMA_DEST_RING_FW,
+ RX_MPDU_END_RXDMA_DEST_RING_SW,
+ RX_MPDU_END_RXDMA_DEST_RING_REO,
+};
+
+#define RX_MPDU_END_INFO1_UNSUP_KTYPE_SHORT_FRAME BIT(11)
+#define RX_MPDU_END_INFO1_RX_IN_TX_DECRYPT_BYT BIT(12)
+#define RX_MPDU_END_INFO1_OVERFLOW_ERR BIT(13)
+#define RX_MPDU_END_INFO1_MPDU_LEN_ERR BIT(14)
+#define RX_MPDU_END_INFO1_TKIP_MIC_ERR BIT(15)
+#define RX_MPDU_END_INFO1_DECRYPT_ERR BIT(16)
+#define RX_MPDU_END_INFO1_UNENCRYPTED_FRAME_ERR BIT(17)
+#define RX_MPDU_END_INFO1_PN_FIELDS_VALID BIT(18)
+#define RX_MPDU_END_INFO1_FCS_ERR BIT(19)
+#define RX_MPDU_END_INFO1_MSDU_LEN_ERR BIT(20)
+#define RX_MPDU_END_INFO1_RXDMA0_DEST_RING GENMASK(22, 21)
+#define RX_MPDU_END_INFO1_RXDMA1_DEST_RING GENMASK(24, 23)
+#define RX_MPDU_END_INFO1_DECRYPT_STATUS_CODE GENMASK(27, 25)
+#define RX_MPDU_END_INFO1_RX_BITMAP_NOT_UPD BIT(28)
+
+struct rx_mpdu_end {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+} __packed;
+
+/* rx_mpdu_end
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * unsup_ktype_short_frame
+ * This bit will be '1' when WEP or TKIP or WAPI key type is
+ * received for 11ah short frame. Crypto will bypass the received
+ * packet without decryption to RxOLE after setting this bit.
+ *
+ * rx_in_tx_decrypt_byp
+ * Indicates that RX packet is not decrypted as Crypto is
+ * busy with TX packet processing.
+ *
+ * overflow_err
+ * RXPCU Receive FIFO ran out of space to receive the full MPDU.
+ * Therefore this MPDU is terminated early and is thus corrupted.
+ *
+ * This MPDU will not be ACKed.
+ *
+ * RXPCU might still be able to correctly receive the following
+ * MPDUs in the PPDU if enough fifo space became available in time.
+ *
+ * mpdu_length_err
+ * Set by RXPCU if the expected MPDU length does not correspond
+ * with the actually received number of bytes in the MPDU.
+ *
+ * tkip_mic_err
+ * Set by Rx crypto when crypto detected a TKIP MIC error for
+ * this MPDU.
+ *
+ * decrypt_err
+ * Set by RX CRYPTO when CRYPTO detected a decrypt error for this
+ * MPDU or CRYPTO received an encrypted frame, but did not get a
+ * valid corresponding key id in the peer entry.
+ *
+ * unencrypted_frame_err
+ * Set by RX CRYPTO when CRYPTO detected an unencrypted frame while
+ * in the peer entry field 'All_frames_shall_be_encrypted' is set.
+ *
+ * pn_fields_contain_valid_info
+ * Set by RX CRYPTO to indicate that there is a valid PN field
+ * present in this MPDU.
+ *
+ * fcs_err
+ * Set by RXPCU when there is an FCS error detected for this MPDU.
+ *
+ * msdu_length_err
+ * Set by RXOLE when there is an msdu length error detected
+ * in at least 1 of the MSDUs embedded within the MPDU.
+ *
+ * rxdma0_destination_ring
+ * rxdma1_destination_ring
+ * The ring to which RXDMA0/1 shall push the frame, assuming
+ * no MPDU level errors are detected. In case of MPDU level
+ * errors, RXDMA0/1 might change the RXDMA0/1 destination. Values
+ * are defined in %enum RX_MPDU_END_RXDMA_DEST_RING_*.
+ *
+ * decrypt_status_code
+ * Field provides insight into the decryption performed. Values
+ * are defined in enum %RX_DESC_DECRYPT_STATUS_CODE_*.
+ *
+ * rx_bitmap_not_updated
+ * Frame is received, but RXPCU could not update the receive bitmap
+ * due to (temporary) fifo constraints.
+ */
+
+/* Padding bytes to avoid TLV's spanning across 128 byte boundary */
+#define HAL_RX_DESC_PADDING0_BYTES 4
+#define HAL_RX_DESC_PADDING1_BYTES 16
+
+#define HAL_RX_DESC_HDR_STATUS_LEN 120
+
+struct hal_rx_desc_ipq8074 {
+ __le32 msdu_end_tag;
+ struct rx_msdu_end_ipq8074 msdu_end;
+ __le32 rx_attn_tag;
+ struct rx_attention attention;
+ __le32 msdu_start_tag;
+ struct rx_msdu_start_ipq8074 msdu_start;
+ u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
+ __le32 mpdu_start_tag;
+ struct rx_mpdu_start_ipq8074 mpdu_start;
+ __le32 mpdu_end_tag;
+ struct rx_mpdu_end mpdu_end;
+ u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
+ __le32 hdr_status_tag;
+ __le32 phy_ppdu_id;
+ u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
+ u8 msdu_payload[];
+} __packed;
+
+struct hal_rx_desc_qcn9074 {
+ __le32 msdu_end_tag;
+ struct rx_msdu_end_qcn9074 msdu_end;
+ __le32 rx_attn_tag;
+ struct rx_attention attention;
+ __le32 msdu_start_tag;
+ struct rx_msdu_start_qcn9074 msdu_start;
+ u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
+ __le32 mpdu_start_tag;
+ struct rx_mpdu_start_qcn9074 mpdu_start;
+ __le32 mpdu_end_tag;
+ struct rx_mpdu_end mpdu_end;
+ u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
+ __le32 hdr_status_tag;
+ __le32 phy_ppdu_id;
+ u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
+ u8 msdu_payload[];
+} __packed;
+
+struct hal_rx_desc_wcn6855 {
+ __le32 msdu_end_tag;
+ struct rx_msdu_end_wcn6855 msdu_end;
+ __le32 rx_attn_tag;
+ struct rx_attention attention;
+ __le32 msdu_start_tag;
+ struct rx_msdu_start_wcn6855 msdu_start;
+ u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
+ __le32 mpdu_start_tag;
+ struct rx_mpdu_start_wcn6855 mpdu_start;
+ __le32 mpdu_end_tag;
+ struct rx_mpdu_end mpdu_end;
+ u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
+ __le32 hdr_status_tag;
+ __le32 phy_ppdu_id;
+ u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
+ u8 msdu_payload[];
+} __packed;
+
+struct hal_rx_desc {
+ union {
+ struct hal_rx_desc_ipq8074 ipq8074;
+ struct hal_rx_desc_qcn9074 qcn9074;
+ struct hal_rx_desc_wcn6855 wcn6855;
+ } u;
+} __packed;
+
+#define HAL_RX_RU_ALLOC_TYPE_MAX 6
+#define RU_26 1
+#define RU_52 2
+#define RU_106 4
+#define RU_242 9
+#define RU_484 18
+#define RU_996 37
+
+#endif /* ATH11K_RX_DESC_H */
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
new file mode 100644
index 000000000000..b6b0516819a6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/relay.h>
+#include "core.h"
+#include "debug.h"
+
+#define ATH11K_SPECTRAL_NUM_RESP_PER_EVENT 2
+#define ATH11K_SPECTRAL_EVENT_TIMEOUT_MS 1
+
+#define ATH11K_SPECTRAL_DWORD_SIZE 4
+#define ATH11K_SPECTRAL_MIN_BINS 32
+#define ATH11K_SPECTRAL_MIN_IB_BINS (ATH11K_SPECTRAL_MIN_BINS >> 1)
+#define ATH11K_SPECTRAL_MAX_IB_BINS(x) ((x)->hw_params.spectral.max_fft_bins >> 1)
+
+#define ATH11K_SPECTRAL_SCAN_COUNT_MAX 4095
+
+/* Max channel computed by sum of 2g and 5g band channels */
+#define ATH11K_SPECTRAL_TOTAL_CHANNEL 41
+#define ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL 70
+#define ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x) (sizeof(struct fft_sample_ath11k) + \
+ ATH11K_SPECTRAL_MAX_IB_BINS(x))
+#define ATH11K_SPECTRAL_TOTAL_SAMPLE (ATH11K_SPECTRAL_TOTAL_CHANNEL * \
+ ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL)
+#define ATH11K_SPECTRAL_SUB_BUFF_SIZE(x) ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x)
+#define ATH11K_SPECTRAL_NUM_SUB_BUF ATH11K_SPECTRAL_TOTAL_SAMPLE
+
+#define ATH11K_SPECTRAL_20MHZ 20
+#define ATH11K_SPECTRAL_40MHZ 40
+#define ATH11K_SPECTRAL_80MHZ 80
+#define ATH11K_SPECTRAL_160MHZ 160
+
+#define ATH11K_SPECTRAL_SIGNATURE 0xFA
+
+#define ATH11K_SPECTRAL_TAG_RADAR_SUMMARY 0x0
+#define ATH11K_SPECTRAL_TAG_RADAR_FFT 0x1
+#define ATH11K_SPECTRAL_TAG_SCAN_SUMMARY 0x2
+#define ATH11K_SPECTRAL_TAG_SCAN_SEARCH 0x3
+
+#define SPECTRAL_TLV_HDR_LEN GENMASK(15, 0)
+#define SPECTRAL_TLV_HDR_TAG GENMASK(23, 16)
+#define SPECTRAL_TLV_HDR_SIGN GENMASK(31, 24)
+
+#define SPECTRAL_SUMMARY_INFO0_AGC_TOTAL_GAIN GENMASK(7, 0)
+#define SPECTRAL_SUMMARY_INFO0_OB_FLAG BIT(8)
+#define SPECTRAL_SUMMARY_INFO0_GRP_IDX GENMASK(16, 9)
+#define SPECTRAL_SUMMARY_INFO0_RECENT_RFSAT BIT(17)
+#define SPECTRAL_SUMMARY_INFO0_INBAND_PWR_DB GENMASK(27, 18)
+#define SPECTRAL_SUMMARY_INFO0_FALSE_SCAN BIT(28)
+#define SPECTRAL_SUMMARY_INFO0_DETECTOR_ID GENMASK(30, 29)
+#define SPECTRAL_SUMMARY_INFO0_PRI80 BIT(31)
+
+#define SPECTRAL_SUMMARY_INFO2_PEAK_SIGNED_IDX GENMASK(11, 0)
+#define SPECTRAL_SUMMARY_INFO2_PEAK_MAGNITUDE GENMASK(21, 12)
+#define SPECTRAL_SUMMARY_INFO2_NARROWBAND_MASK GENMASK(29, 22)
+#define SPECTRAL_SUMMARY_INFO2_GAIN_CHANGE BIT(30)
+
+struct spectral_tlv {
+ __le32 timestamp;
+ __le32 header;
+} __packed;
+
+struct spectral_summary_fft_report {
+ __le32 timestamp;
+ __le32 tlv_header;
+ __le32 info0;
+ __le32 reserve0;
+ __le32 info2;
+ __le32 reserve1;
+} __packed;
+
+struct ath11k_spectral_summary_report {
+ struct wmi_dma_buf_release_meta_data meta;
+ u32 timestamp;
+ u8 agc_total_gain;
+ u8 grp_idx;
+ u16 inb_pwr_db;
+ s16 peak_idx;
+ u16 peak_mag;
+ u8 detector_id;
+ bool out_of_band_flag;
+ bool rf_saturation;
+ bool primary80;
+ bool gain_change;
+ bool false_scan;
+};
+
+#define SPECTRAL_FFT_REPORT_INFO0_DETECTOR_ID GENMASK(1, 0)
+#define SPECTRAL_FFT_REPORT_INFO0_FFT_NUM GENMASK(4, 2)
+#define SPECTRAL_FFT_REPORT_INFO0_RADAR_CHECK GENMASK(16, 5)
+#define SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX GENMASK(27, 17)
+#define SPECTRAL_FFT_REPORT_INFO0_CHAIN_IDX GENMASK(30, 28)
+
+#define SPECTRAL_FFT_REPORT_INFO1_BASE_PWR_DB GENMASK(8, 0)
+#define SPECTRAL_FFT_REPORT_INFO1_TOTAL_GAIN_DB GENMASK(16, 9)
+
+#define SPECTRAL_FFT_REPORT_INFO2_NUM_STRONG_BINS GENMASK(7, 0)
+#define SPECTRAL_FFT_REPORT_INFO2_PEAK_MAGNITUDE GENMASK(17, 8)
+#define SPECTRAL_FFT_REPORT_INFO2_AVG_PWR_DB GENMASK(24, 18)
+#define SPECTRAL_FFT_REPORT_INFO2_REL_PWR_DB GENMASK(31, 25)
+
+struct spectral_search_fft_report {
+ __le32 timestamp;
+ __le32 tlv_header;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 reserve0;
+ u8 bins[];
+} __packed;
+
+struct ath11k_spectral_search_report {
+ u32 timestamp;
+ u8 detector_id;
+ u8 fft_count;
+ u16 radar_check;
+ s16 peak_idx;
+ u8 chain_idx;
+ u16 base_pwr_db;
+ u8 total_gain_db;
+ u8 strong_bin_count;
+ u16 peak_mag;
+ u8 avg_pwr_db;
+ u8 rel_pwr_db;
+};
+
+static struct dentry *create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ buf_file = debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+ *is_global = 1;
+ return buf_file;
+}
+
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+
+ return 0;
+}
+
+static const struct rchan_callbacks rfs_scan_cb = {
+ .create_buf_file = create_buf_file_handler,
+ .remove_buf_file = remove_buf_file_handler,
+};
+
+static struct ath11k_vif *ath11k_spectral_get_vdev(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (list_empty(&ar->arvifs))
+ return NULL;
+
+ /* if there already is a vif doing spectral, return that. */
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ if (arvif->spectral_enabled)
+ return arvif;
+
+ /* otherwise, return the first vif. */
+ return list_first_entry(&ar->arvifs, typeof(*arvif), list);
+}
+
+static int ath11k_spectral_scan_trigger(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arvif = ath11k_spectral_get_vdev(ar);
+ if (!arvif)
+ return -ENODEV;
+
+ if (ar->spectral.mode == ATH11K_SPECTRAL_DISABLED)
+ return 0;
+
+ ar->spectral.is_primary = true;
+
+ ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
+ ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
+ ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE);
+ if (ret)
+ return ret;
+
+ ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
+ ATH11K_WMI_SPECTRAL_TRIGGER_CMD_TRIGGER,
+ ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ath11k_spectral_scan_config(struct ath11k *ar,
+ enum ath11k_spectral_mode mode)
+{
+ struct ath11k_wmi_vdev_spectral_conf_param param = {};
+ struct ath11k_vif *arvif;
+ int ret, count;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arvif = ath11k_spectral_get_vdev(ar);
+ if (!arvif)
+ return -ENODEV;
+
+ arvif->spectral_enabled = (mode != ATH11K_SPECTRAL_DISABLED);
+
+ spin_lock_bh(&ar->spectral.lock);
+ ar->spectral.mode = mode;
+ spin_unlock_bh(&ar->spectral.lock);
+
+ ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
+ ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
+ ATH11K_WMI_SPECTRAL_ENABLE_CMD_DISABLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to enable spectral scan: %d\n", ret);
+ return ret;
+ }
+
+ if (mode == ATH11K_SPECTRAL_DISABLED)
+ return 0;
+
+ if (mode == ATH11K_SPECTRAL_BACKGROUND)
+ count = ATH11K_WMI_SPECTRAL_COUNT_DEFAULT;
+ else
+ count = max_t(u16, 1, ar->spectral.count);
+
+ param.vdev_id = arvif->vdev_id;
+ param.scan_count = count;
+ param.scan_fft_size = ar->spectral.fft_size;
+ param.scan_period = ATH11K_WMI_SPECTRAL_PERIOD_DEFAULT;
+ param.scan_priority = ATH11K_WMI_SPECTRAL_PRIORITY_DEFAULT;
+ param.scan_gc_ena = ATH11K_WMI_SPECTRAL_GC_ENA_DEFAULT;
+ param.scan_restart_ena = ATH11K_WMI_SPECTRAL_RESTART_ENA_DEFAULT;
+ param.scan_noise_floor_ref = ATH11K_WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
+ param.scan_init_delay = ATH11K_WMI_SPECTRAL_INIT_DELAY_DEFAULT;
+ param.scan_nb_tone_thr = ATH11K_WMI_SPECTRAL_NB_TONE_THR_DEFAULT;
+ param.scan_str_bin_thr = ATH11K_WMI_SPECTRAL_STR_BIN_THR_DEFAULT;
+ param.scan_wb_rpt_mode = ATH11K_WMI_SPECTRAL_WB_RPT_MODE_DEFAULT;
+ param.scan_rssi_rpt_mode = ATH11K_WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT;
+ param.scan_rssi_thr = ATH11K_WMI_SPECTRAL_RSSI_THR_DEFAULT;
+ param.scan_pwr_format = ATH11K_WMI_SPECTRAL_PWR_FORMAT_DEFAULT;
+ param.scan_rpt_mode = ATH11K_WMI_SPECTRAL_RPT_MODE_DEFAULT;
+ param.scan_bin_scale = ATH11K_WMI_SPECTRAL_BIN_SCALE_DEFAULT;
+ param.scan_dbm_adj = ATH11K_WMI_SPECTRAL_DBM_ADJ_DEFAULT;
+ param.scan_chn_mask = ATH11K_WMI_SPECTRAL_CHN_MASK_DEFAULT;
+
+ ret = ath11k_wmi_vdev_spectral_conf(ar, &param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to configure spectral scan: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static ssize_t ath11k_read_file_spec_scan_ctl(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char *mode = "";
+ size_t len;
+ enum ath11k_spectral_mode spectral_mode;
+
+ mutex_lock(&ar->conf_mutex);
+ spectral_mode = ar->spectral.mode;
+ mutex_unlock(&ar->conf_mutex);
+
+ switch (spectral_mode) {
+ case ATH11K_SPECTRAL_DISABLED:
+ mode = "disable";
+ break;
+ case ATH11K_SPECTRAL_BACKGROUND:
+ mode = "background";
+ break;
+ case ATH11K_SPECTRAL_MANUAL:
+ mode = "manual";
+ break;
+ }
+
+ len = strlen(mode);
+ return simple_read_from_buffer(user_buf, count, ppos, mode, len);
+}
+
+static ssize_t ath11k_write_file_spec_scan_ctl(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ ssize_t len;
+ int ret;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (strncmp("trigger", buf, 7) == 0) {
+ if (ar->spectral.mode == ATH11K_SPECTRAL_MANUAL ||
+ ar->spectral.mode == ATH11K_SPECTRAL_BACKGROUND) {
+ /* reset the configuration to adopt possibly changed
+ * debugfs parameters
+ */
+ ret = ath11k_spectral_scan_config(ar, ar->spectral.mode);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to reconfigure spectral scan: %d\n",
+ ret);
+ goto unlock;
+ }
+
+ ret = ath11k_spectral_scan_trigger(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to trigger spectral scan: %d\n",
+ ret);
+ }
+ } else {
+ ret = -EINVAL;
+ }
+ } else if (strncmp("background", buf, 10) == 0) {
+ ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_BACKGROUND);
+ } else if (strncmp("manual", buf, 6) == 0) {
+ ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_MANUAL);
+ } else if (strncmp("disable", buf, 7) == 0) {
+ ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED);
+ } else {
+ ret = -EINVAL;
+ }
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_scan_ctl = {
+ .read = ath11k_read_file_spec_scan_ctl,
+ .write = ath11k_write_file_spec_scan_ctl,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_file_spectral_count(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+ u16 spectral_count;
+
+ mutex_lock(&ar->conf_mutex);
+ spectral_count = ar->spectral.count;
+ mutex_unlock(&ar->conf_mutex);
+
+ len = sprintf(buf, "%d\n", spectral_count);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_file_spectral_count(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ unsigned long val;
+ ssize_t ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val > ATH11K_SPECTRAL_SCAN_COUNT_MAX)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->spectral.count = val;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_scan_count = {
+ .read = ath11k_read_file_spectral_count,
+ .write = ath11k_write_file_spectral_count,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_file_spectral_bins(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ unsigned int bins, fft_size;
+ size_t len;
+
+ mutex_lock(&ar->conf_mutex);
+
+ fft_size = ar->spectral.fft_size;
+ bins = 1 << fft_size;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ len = sprintf(buf, "%d\n", bins);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_file_spectral_bins(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ unsigned long val;
+ ssize_t ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val < ATH11K_SPECTRAL_MIN_BINS ||
+ val > ar->ab->hw_params.spectral.max_fft_bins)
+ return -EINVAL;
+
+ if (!is_power_of_2(val))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->spectral.fft_size = ilog2(val);
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_scan_bins = {
+ .read = ath11k_read_file_spectral_bins,
+ .write = ath11k_write_file_spectral_bins,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_spectral_pull_summary(struct ath11k *ar,
+ struct wmi_dma_buf_release_meta_data *meta,
+ struct spectral_summary_fft_report *summary,
+ struct ath11k_spectral_summary_report *report)
+{
+ report->timestamp = __le32_to_cpu(summary->timestamp);
+ report->agc_total_gain = FIELD_GET(SPECTRAL_SUMMARY_INFO0_AGC_TOTAL_GAIN,
+ __le32_to_cpu(summary->info0));
+ report->out_of_band_flag = FIELD_GET(SPECTRAL_SUMMARY_INFO0_OB_FLAG,
+ __le32_to_cpu(summary->info0));
+ report->grp_idx = FIELD_GET(SPECTRAL_SUMMARY_INFO0_GRP_IDX,
+ __le32_to_cpu(summary->info0));
+ report->rf_saturation = FIELD_GET(SPECTRAL_SUMMARY_INFO0_RECENT_RFSAT,
+ __le32_to_cpu(summary->info0));
+ report->inb_pwr_db = FIELD_GET(SPECTRAL_SUMMARY_INFO0_INBAND_PWR_DB,
+ __le32_to_cpu(summary->info0));
+ report->false_scan = FIELD_GET(SPECTRAL_SUMMARY_INFO0_FALSE_SCAN,
+ __le32_to_cpu(summary->info0));
+ report->detector_id = FIELD_GET(SPECTRAL_SUMMARY_INFO0_DETECTOR_ID,
+ __le32_to_cpu(summary->info0));
+ report->primary80 = FIELD_GET(SPECTRAL_SUMMARY_INFO0_PRI80,
+ __le32_to_cpu(summary->info0));
+ report->peak_idx = FIELD_GET(SPECTRAL_SUMMARY_INFO2_PEAK_SIGNED_IDX,
+ __le32_to_cpu(summary->info2));
+ report->peak_mag = FIELD_GET(SPECTRAL_SUMMARY_INFO2_PEAK_MAGNITUDE,
+ __le32_to_cpu(summary->info2));
+ report->gain_change = FIELD_GET(SPECTRAL_SUMMARY_INFO2_GAIN_CHANGE,
+ __le32_to_cpu(summary->info2));
+
+ memcpy(&report->meta, meta, sizeof(*meta));
+
+ return 0;
+}
+
+static int ath11k_spectral_pull_search(struct ath11k *ar,
+ struct spectral_search_fft_report *search,
+ struct ath11k_spectral_search_report *report)
+{
+ report->timestamp = __le32_to_cpu(search->timestamp);
+ report->detector_id = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_DETECTOR_ID,
+ __le32_to_cpu(search->info0));
+ report->fft_count = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_FFT_NUM,
+ __le32_to_cpu(search->info0));
+ report->radar_check = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_RADAR_CHECK,
+ __le32_to_cpu(search->info0));
+ report->peak_idx = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX,
+ __le32_to_cpu(search->info0));
+ report->chain_idx = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_CHAIN_IDX,
+ __le32_to_cpu(search->info0));
+ report->base_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO1_BASE_PWR_DB,
+ __le32_to_cpu(search->info1));
+ report->total_gain_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO1_TOTAL_GAIN_DB,
+ __le32_to_cpu(search->info1));
+ report->strong_bin_count = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_NUM_STRONG_BINS,
+ __le32_to_cpu(search->info2));
+ report->peak_mag = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_PEAK_MAGNITUDE,
+ __le32_to_cpu(search->info2));
+ report->avg_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_AVG_PWR_DB,
+ __le32_to_cpu(search->info2));
+ report->rel_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_REL_PWR_DB,
+ __le32_to_cpu(search->info2));
+
+ return 0;
+}
+
+static u8 ath11k_spectral_get_max_exp(s8 max_index, u8 max_magnitude,
+ int bin_len, u8 *bins)
+{
+ int dc_pos;
+ u8 max_exp;
+
+ dc_pos = bin_len / 2;
+
+ /* peak index outside of bins */
+ if (dc_pos <= max_index || -dc_pos >= max_index)
+ return 0;
+
+ for (max_exp = 0; max_exp < 8; max_exp++) {
+ if (bins[dc_pos + max_index] == (max_magnitude >> max_exp))
+ break;
+ }
+
+ /* max_exp not found */
+ if (bins[dc_pos + max_index] != (max_magnitude >> max_exp))
+ return 0;
+
+ return max_exp;
+}
+
+static void ath11k_spectral_parse_fft(u8 *outbins, u8 *inbins, int num_bins, u8 fft_sz)
+{
+ int i, j;
+
+ i = 0;
+ j = 0;
+ while (i < num_bins) {
+ outbins[i] = inbins[j];
+ i++;
+ j += fft_sz;
+ }
+}
+
+static
+int ath11k_spectral_process_fft(struct ath11k *ar,
+ struct ath11k_spectral_summary_report *summary,
+ void *data,
+ struct fft_sample_ath11k *fft_sample,
+ u32 data_len)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct spectral_search_fft_report *fft_report = data;
+ struct ath11k_spectral_search_report search;
+ struct spectral_tlv *tlv;
+ int tlv_len, bin_len, num_bins;
+ u16 length, freq;
+ u8 chan_width_mhz, bin_sz;
+ int ret;
+ u32 check_length;
+ bool fragment_sample = false;
+
+ lockdep_assert_held(&ar->spectral.lock);
+
+ if (!ab->hw_params.spectral.fft_sz) {
+ ath11k_warn(ab, "invalid bin size type for hw rev %d\n",
+ ab->hw_rev);
+ return -EINVAL;
+ }
+
+ tlv = data;
+ tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header));
+ /* convert Dword into bytes */
+ tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE;
+ bin_len = tlv_len - ab->hw_params.spectral.fft_hdr_len;
+
+ if (data_len < (bin_len + sizeof(*fft_report))) {
+ ath11k_warn(ab, "mismatch in expected bin len %d and data len %d\n",
+ bin_len, data_len);
+ return -EINVAL;
+ }
+
+ bin_sz = ab->hw_params.spectral.fft_sz + ab->hw_params.spectral.fft_pad_sz;
+ num_bins = bin_len / bin_sz;
+ /* Only In-band bins are useful to user for visualize */
+ num_bins >>= 1;
+
+ if (num_bins < ATH11K_SPECTRAL_MIN_IB_BINS ||
+ num_bins > ATH11K_SPECTRAL_MAX_IB_BINS(ab) ||
+ !is_power_of_2(num_bins)) {
+ ath11k_warn(ab, "Invalid num of bins %d\n", num_bins);
+ return -EINVAL;
+ }
+
+ check_length = sizeof(*fft_report) + (num_bins * ab->hw_params.spectral.fft_sz);
+ ret = ath11k_dbring_validate_buffer(ar, data, check_length);
+ if (ret) {
+ ath11k_warn(ar->ab, "found magic value in fft data, dropping\n");
+ return ret;
+ }
+
+ ret = ath11k_spectral_pull_search(ar, data, &search);
+ if (ret) {
+ ath11k_warn(ab, "failed to pull search report %d\n", ret);
+ return ret;
+ }
+
+ chan_width_mhz = summary->meta.ch_width;
+
+ switch (chan_width_mhz) {
+ case ATH11K_SPECTRAL_20MHZ:
+ case ATH11K_SPECTRAL_40MHZ:
+ case ATH11K_SPECTRAL_80MHZ:
+ fft_sample->chan_width_mhz = chan_width_mhz;
+ break;
+ case ATH11K_SPECTRAL_160MHZ:
+ if (ab->hw_params.spectral.fragment_160mhz) {
+ chan_width_mhz /= 2;
+ fragment_sample = true;
+ }
+ fft_sample->chan_width_mhz = chan_width_mhz;
+ break;
+ default:
+ ath11k_warn(ab, "invalid channel width %d\n", chan_width_mhz);
+ return -EINVAL;
+ }
+
+ length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + num_bins;
+ fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH11K;
+ fft_sample->tlv.length = __cpu_to_be16(length);
+
+ fft_sample->tsf = __cpu_to_be32(search.timestamp);
+ fft_sample->max_magnitude = __cpu_to_be16(search.peak_mag);
+ fft_sample->max_index = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX,
+ __le32_to_cpu(fft_report->info0));
+
+ summary->inb_pwr_db >>= 1;
+ fft_sample->rssi = __cpu_to_be16(summary->inb_pwr_db);
+ fft_sample->noise = __cpu_to_be32(summary->meta.noise_floor[search.chain_idx]);
+
+ freq = summary->meta.freq1;
+ fft_sample->freq1 = __cpu_to_be16(freq);
+
+ freq = summary->meta.freq2;
+ fft_sample->freq2 = __cpu_to_be16(freq);
+
+ /* If freq2 is available then the spectral scan results are fragmented
+ * as primary and secondary
+ */
+ if (fragment_sample && freq) {
+ if (!ar->spectral.is_primary)
+ fft_sample->freq1 = cpu_to_be16(freq);
+
+ /* We have to toggle the is_primary to handle the next report */
+ ar->spectral.is_primary = !ar->spectral.is_primary;
+ }
+
+ ath11k_spectral_parse_fft(fft_sample->data, fft_report->bins, num_bins,
+ ab->hw_params.spectral.fft_sz);
+
+ fft_sample->max_exp = ath11k_spectral_get_max_exp(fft_sample->max_index,
+ search.peak_mag,
+ num_bins,
+ fft_sample->data);
+
+ if (ar->spectral.rfs_scan)
+ relay_write(ar->spectral.rfs_scan, fft_sample,
+ length + sizeof(struct fft_sample_tlv));
+
+ return 0;
+}
+
+static int ath11k_spectral_process_data(struct ath11k *ar,
+ struct ath11k_dbring_data *param)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct spectral_tlv *tlv;
+ struct spectral_summary_fft_report *summary = NULL;
+ struct ath11k_spectral_summary_report summ_rpt;
+ struct fft_sample_ath11k *fft_sample = NULL;
+ u8 *data;
+ u32 data_len, i;
+ u8 sign, tag;
+ int tlv_len, sample_sz;
+ int ret;
+ bool quit = false;
+
+ spin_lock_bh(&ar->spectral.lock);
+
+ if (!ar->spectral.enabled) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ sample_sz = sizeof(*fft_sample) + ATH11K_SPECTRAL_MAX_IB_BINS(ab);
+ fft_sample = kmalloc(sample_sz, GFP_ATOMIC);
+ if (!fft_sample) {
+ ret = -ENOBUFS;
+ goto unlock;
+ }
+
+ data = param->data;
+ data_len = param->data_sz;
+ i = 0;
+ while (!quit && (i < data_len)) {
+ if ((i + sizeof(*tlv)) > data_len) {
+ ath11k_warn(ab, "failed to parse spectral tlv hdr at bytes %d\n",
+ i);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ tlv = (struct spectral_tlv *)&data[i];
+ sign = FIELD_GET(SPECTRAL_TLV_HDR_SIGN,
+ __le32_to_cpu(tlv->header));
+ if (sign != ATH11K_SPECTRAL_SIGNATURE) {
+ ath11k_warn(ab, "Invalid sign 0x%x at bytes %d\n",
+ sign, i);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN,
+ __le32_to_cpu(tlv->header));
+ /* convert Dword into bytes */
+ tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE;
+ if ((i + sizeof(*tlv) + tlv_len) > data_len) {
+ ath11k_warn(ab, "failed to parse spectral tlv payload at bytes %d tlv_len:%d data_len:%d\n",
+ i, tlv_len, data_len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ tag = FIELD_GET(SPECTRAL_TLV_HDR_TAG,
+ __le32_to_cpu(tlv->header));
+ switch (tag) {
+ case ATH11K_SPECTRAL_TAG_SCAN_SUMMARY:
+ /* HW bug in tlv length of summary report,
+ * HW report 3 DWORD size but the data payload
+ * is 4 DWORD size (16 bytes).
+ * Need to remove this workaround once HW bug fixed
+ */
+ tlv_len = sizeof(*summary) - sizeof(*tlv) +
+ ab->hw_params.spectral.summary_pad_sz;
+
+ if (tlv_len < (sizeof(*summary) - sizeof(*tlv))) {
+ ath11k_warn(ab, "failed to parse spectral summary at bytes %d tlv_len:%d\n",
+ i, tlv_len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = ath11k_dbring_validate_buffer(ar, data, tlv_len);
+ if (ret) {
+ ath11k_warn(ar->ab, "found magic value in spectral summary, dropping\n");
+ goto err;
+ }
+
+ summary = (struct spectral_summary_fft_report *)tlv;
+ ath11k_spectral_pull_summary(ar, &param->meta,
+ summary, &summ_rpt);
+ break;
+ case ATH11K_SPECTRAL_TAG_SCAN_SEARCH:
+ if (tlv_len < (sizeof(struct spectral_search_fft_report) -
+ sizeof(*tlv))) {
+ ath11k_warn(ab, "failed to parse spectral search fft at bytes %d\n",
+ i);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ memset(fft_sample, 0, sample_sz);
+ ret = ath11k_spectral_process_fft(ar, &summ_rpt, tlv,
+ fft_sample,
+ data_len - i);
+ if (ret) {
+ ath11k_warn(ab, "failed to process spectral fft at bytes %d\n",
+ i);
+ goto err;
+ }
+ quit = true;
+ break;
+ }
+
+ i += sizeof(*tlv) + tlv_len;
+ }
+
+ ret = 0;
+
+err:
+ kfree(fft_sample);
+unlock:
+ spin_unlock_bh(&ar->spectral.lock);
+ return ret;
+}
+
+static int ath11k_spectral_ring_alloc(struct ath11k *ar,
+ struct ath11k_dbring_cap *db_cap)
+{
+ struct ath11k_spectral *sp = &ar->spectral;
+ int ret;
+
+ ret = ath11k_dbring_srng_setup(ar, &sp->rx_ring,
+ 0, db_cap->min_elem);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup db ring\n");
+ return ret;
+ }
+
+ ath11k_dbring_set_cfg(ar, &sp->rx_ring,
+ ATH11K_SPECTRAL_NUM_RESP_PER_EVENT,
+ ATH11K_SPECTRAL_EVENT_TIMEOUT_MS,
+ ath11k_spectral_process_data);
+
+ ret = ath11k_dbring_buf_setup(ar, &sp->rx_ring, db_cap);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup db ring buffer\n");
+ goto srng_cleanup;
+ }
+
+ ret = ath11k_dbring_wmi_cfg_setup(ar, &sp->rx_ring,
+ WMI_DIRECT_BUF_SPECTRAL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
+ goto buffer_cleanup;
+ }
+
+ return 0;
+
+buffer_cleanup:
+ ath11k_dbring_buf_cleanup(ar, &sp->rx_ring);
+srng_cleanup:
+ ath11k_dbring_srng_cleanup(ar, &sp->rx_ring);
+ return ret;
+}
+
+static inline void ath11k_spectral_ring_free(struct ath11k *ar)
+{
+ struct ath11k_spectral *sp = &ar->spectral;
+
+ ath11k_dbring_srng_cleanup(ar, &sp->rx_ring);
+ ath11k_dbring_buf_cleanup(ar, &sp->rx_ring);
+}
+
+static inline void ath11k_spectral_debug_unregister(struct ath11k *ar)
+{
+ debugfs_remove(ar->spectral.scan_bins);
+ ar->spectral.scan_bins = NULL;
+
+ debugfs_remove(ar->spectral.scan_count);
+ ar->spectral.scan_count = NULL;
+
+ debugfs_remove(ar->spectral.scan_ctl);
+ ar->spectral.scan_ctl = NULL;
+
+ if (ar->spectral.rfs_scan) {
+ relay_close(ar->spectral.rfs_scan);
+ ar->spectral.rfs_scan = NULL;
+ }
+}
+
+int ath11k_spectral_vif_stop(struct ath11k_vif *arvif)
+{
+ if (!arvif->spectral_enabled)
+ return 0;
+
+ return ath11k_spectral_scan_config(arvif->ar, ATH11K_SPECTRAL_DISABLED);
+}
+
+void ath11k_spectral_reset_buffer(struct ath11k *ar)
+{
+ if (!ar->spectral.enabled)
+ return;
+
+ if (ar->spectral.rfs_scan)
+ relay_reset(ar->spectral.rfs_scan);
+}
+
+void ath11k_spectral_deinit(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_spectral *sp;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ sp = &ar->spectral;
+
+ if (!sp->enabled)
+ continue;
+
+ mutex_lock(&ar->conf_mutex);
+ ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED);
+ mutex_unlock(&ar->conf_mutex);
+
+ spin_lock_bh(&sp->lock);
+ sp->enabled = false;
+ spin_unlock_bh(&sp->lock);
+
+ ath11k_spectral_debug_unregister(ar);
+ ath11k_spectral_ring_free(ar);
+ }
+}
+
+static inline int ath11k_spectral_debug_register(struct ath11k *ar)
+{
+ int ret;
+
+ ar->spectral.rfs_scan = relay_open("spectral_scan",
+ ar->debug.debugfs_pdev,
+ ATH11K_SPECTRAL_SUB_BUFF_SIZE(ar->ab),
+ ATH11K_SPECTRAL_NUM_SUB_BUF,
+ &rfs_scan_cb, NULL);
+ if (!ar->spectral.rfs_scan) {
+ ath11k_warn(ar->ab, "failed to open relay in pdev %d\n",
+ ar->pdev_idx);
+ return -EINVAL;
+ }
+
+ ar->spectral.scan_ctl = debugfs_create_file("spectral_scan_ctl",
+ 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_scan_ctl);
+ if (!ar->spectral.scan_ctl) {
+ ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n",
+ ar->pdev_idx);
+ ret = -EINVAL;
+ goto debug_unregister;
+ }
+
+ ar->spectral.scan_count = debugfs_create_file("spectral_count",
+ 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_scan_count);
+ if (!ar->spectral.scan_count) {
+ ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n",
+ ar->pdev_idx);
+ ret = -EINVAL;
+ goto debug_unregister;
+ }
+
+ ar->spectral.scan_bins = debugfs_create_file("spectral_bins",
+ 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_scan_bins);
+ if (!ar->spectral.scan_bins) {
+ ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n",
+ ar->pdev_idx);
+ ret = -EINVAL;
+ goto debug_unregister;
+ }
+
+ return 0;
+
+debug_unregister:
+ ath11k_spectral_debug_unregister(ar);
+ return ret;
+}
+
+int ath11k_spectral_init(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_spectral *sp;
+ struct ath11k_dbring_cap db_cap;
+ int ret;
+ int i;
+
+ if (!test_bit(WMI_TLV_SERVICE_FREQINFO_IN_METADATA,
+ ab->wmi_ab.svc_map))
+ return 0;
+
+ if (!ab->hw_params.spectral.fft_sz)
+ return 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ sp = &ar->spectral;
+
+ ret = ath11k_dbring_get_cap(ar->ab, ar->pdev_idx,
+ WMI_DIRECT_BUF_SPECTRAL,
+ &db_cap);
+ if (ret)
+ continue;
+
+ idr_init(&sp->rx_ring.bufs_idr);
+ spin_lock_init(&sp->rx_ring.idr_lock);
+ spin_lock_init(&sp->lock);
+
+ ret = ath11k_spectral_ring_alloc(ar, &db_cap);
+ if (ret) {
+ ath11k_warn(ab, "failed to init spectral ring for pdev %d\n",
+ i);
+ goto deinit;
+ }
+
+ spin_lock_bh(&sp->lock);
+
+ sp->mode = ATH11K_SPECTRAL_DISABLED;
+ sp->count = ATH11K_WMI_SPECTRAL_COUNT_DEFAULT;
+ sp->fft_size = ATH11K_WMI_SPECTRAL_FFT_SIZE_DEFAULT;
+ sp->enabled = true;
+
+ spin_unlock_bh(&sp->lock);
+
+ ret = ath11k_spectral_debug_register(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to register spectral for pdev %d\n",
+ i);
+ goto deinit;
+ }
+ }
+
+ return 0;
+
+deinit:
+ ath11k_spectral_deinit(ab);
+ return ret;
+}
+
+enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar)
+{
+ if (ar->spectral.enabled)
+ return ar->spectral.mode;
+ else
+ return ATH11K_SPECTRAL_DISABLED;
+}
+
+struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar)
+{
+ if (ar->spectral.enabled)
+ return &ar->spectral.rx_ring;
+ else
+ return NULL;
+}
diff --git a/drivers/net/wireless/ath/ath11k/spectral.h b/drivers/net/wireless/ath/ath11k/spectral.h
new file mode 100644
index 000000000000..789cff7c64a7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/spectral.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_SPECTRAL_H
+#define ATH11K_SPECTRAL_H
+
+#include "../spectral_common.h"
+#include "dbring.h"
+
+/* enum ath11k_spectral_mode:
+ *
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ * something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ * is performed manually.
+ */
+enum ath11k_spectral_mode {
+ ATH11K_SPECTRAL_DISABLED = 0,
+ ATH11K_SPECTRAL_BACKGROUND,
+ ATH11K_SPECTRAL_MANUAL,
+};
+
+struct ath11k_spectral {
+ struct ath11k_dbring rx_ring;
+ /* Protects enabled */
+ spinlock_t lock;
+ struct rchan *rfs_scan; /* relay(fs) channel for spectral scan */
+ struct dentry *scan_ctl;
+ struct dentry *scan_count;
+ struct dentry *scan_bins;
+ enum ath11k_spectral_mode mode;
+ u16 count;
+ u8 fft_size;
+ bool enabled;
+ bool is_primary;
+};
+
+#ifdef CONFIG_ATH11K_SPECTRAL
+
+int ath11k_spectral_init(struct ath11k_base *ab);
+void ath11k_spectral_deinit(struct ath11k_base *ab);
+int ath11k_spectral_vif_stop(struct ath11k_vif *arvif);
+void ath11k_spectral_reset_buffer(struct ath11k *ar);
+enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar);
+struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar);
+
+#else
+
+static inline int ath11k_spectral_init(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_spectral_deinit(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_spectral_vif_stop(struct ath11k_vif *arvif)
+{
+ return 0;
+}
+
+static inline void ath11k_spectral_reset_buffer(struct ath11k *ar)
+{
+}
+
+static inline
+enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar)
+{
+ return ATH11K_SPECTRAL_DISABLED;
+}
+
+static inline
+struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_ATH11K_SPECTRAL */
+#endif /* ATH11K_SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c
new file mode 100644
index 000000000000..a9751ea2a0b7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/testmode.c
@@ -0,0 +1,502 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "testmode.h"
+#include <net/netlink.h>
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "core.h"
+#include "../testmode_i.h"
+
+#define ATH11K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0)
+#define ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4)
+
+static const struct nla_policy ath11k_tm_policy[ATH_TM_ATTR_MAX + 1] = {
+ [ATH_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = ATH_TM_DATA_MAX_LEN },
+ [ATH_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
+ [ATH_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
+};
+
+static struct ath11k *ath11k_tm_get_ar(struct ath11k_base *ab)
+{
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar = NULL;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ if (ar && ar->state == ATH11K_STATE_FTM)
+ break;
+ }
+
+ return ar;
+}
+
+/* This function handles unsegmented events. Data in various events are aggregated
+ * in application layer, this event is unsegmented from host perspective.
+ */
+static void ath11k_tm_wmi_event_unsegmented(struct ath11k_base *ab, u32 cmd_id,
+ struct sk_buff *skb)
+{
+ struct sk_buff *nl_skb;
+ struct ath11k *ar;
+
+ ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
+ "event wmi cmd_id %d skb length %d\n",
+ cmd_id, skb->len);
+ ath11k_dbg_dump(ab, ATH11K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+ ar = ath11k_tm_get_ar(ab);
+ if (!ar) {
+ ath11k_warn(ab, "testmode event not handled due to invalid pdev\n");
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+ 2 * nla_total_size(sizeof(u32)) +
+ nla_total_size(skb->len),
+ GFP_ATOMIC);
+ if (!nl_skb) {
+ ath11k_warn(ab,
+ "failed to allocate skb for unsegmented testmode wmi event\n");
+ goto out;
+ }
+
+ if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI) ||
+ nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+ nla_put(nl_skb, ATH_TM_ATTR_DATA, skb->len, skb->data)) {
+ ath11k_warn(ab, "failed to populate testmode unsegmented event\n");
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+ ath11k_warn(ab, "Failed to send testmode event to higher layers\n");
+}
+
+/* This function handles segmented events. Data of various events received
+ * from firmware is aggregated and sent to application layer
+ */
+static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
+ const struct wmi_ftm_event_msg *ftm_msg,
+ u16 length)
+{
+ struct sk_buff *nl_skb;
+ int ret = 0;
+ struct ath11k *ar;
+ u8 const *buf_pos;
+ u16 datalen;
+ u8 total_segments, current_seq;
+ u32 data_pos;
+ u32 pdev_id;
+
+ ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
+ "event wmi cmd_id %d ftm event msg %p datalen %d\n",
+ cmd_id, ftm_msg, length);
+ ath11k_dbg_dump(ab, ATH11K_DBG_TESTMODE, NULL, "", ftm_msg, length);
+ pdev_id = DP_HW2SW_MACID(ftm_msg->seg_hdr.pdev_id);
+
+ if (pdev_id >= ab->num_radios) {
+ ath11k_warn(ab, "testmode event not handled due to invalid pdev id: %d\n",
+ pdev_id);
+ return -EINVAL;
+ }
+
+ ar = ab->pdevs[pdev_id].ar;
+ if (!ar) {
+ ath11k_warn(ab, "testmode event not handled due to absence of pdev\n");
+ return -ENODEV;
+ }
+
+ current_seq = FIELD_GET(ATH11K_FTM_SEGHDR_CURRENT_SEQ,
+ ftm_msg->seg_hdr.segmentinfo);
+ total_segments = FIELD_GET(ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS,
+ ftm_msg->seg_hdr.segmentinfo);
+ datalen = length - (sizeof(struct wmi_ftm_seg_hdr));
+ buf_pos = ftm_msg->data;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (current_seq == 0) {
+ ab->testmode.expected_seq = 0;
+ ab->testmode.data_pos = 0;
+ }
+
+ data_pos = ab->testmode.data_pos;
+
+ if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) {
+ ath11k_warn(ab, "Invalid ftm event length at %d: %d\n",
+ data_pos, datalen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->testmode.eventdata[data_pos], buf_pos, datalen);
+ data_pos += datalen;
+
+ if (++ab->testmode.expected_seq != total_segments) {
+ ab->testmode.data_pos = data_pos;
+ ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
+ "partial data received current_seq %d total_seg %d\n",
+ current_seq, total_segments);
+ goto out;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
+ "total data length pos %d len %d\n",
+ data_pos, ftm_msg->seg_hdr.len);
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+ 2 * nla_total_size(sizeof(u32)) +
+ nla_total_size(data_pos),
+ GFP_ATOMIC);
+ if (!nl_skb) {
+ ath11k_warn(ab,
+ "failed to allocate skb for segmented testmode wmi event\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD,
+ ATH_TM_CMD_WMI_FTM) ||
+ nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+ nla_put(nl_skb, ATH_TM_ATTR_DATA, data_pos,
+ &ab->testmode.eventdata[0])) {
+ ath11k_warn(ab, "failed to populate segmented testmode event");
+ kfree_skb(nl_skb);
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+ return ret;
+}
+
+static void ath11k_tm_wmi_event_segmented(struct ath11k_base *ab, u32 cmd_id,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_ftm_event_msg *ev;
+ u16 length;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_ARRAY_BYTE];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch ftm msg\n");
+ kfree(tb);
+ return;
+ }
+
+ length = skb->len - TLV_HDR_SIZE;
+ ret = ath11k_tm_process_event(ab, cmd_id, ev, length);
+ if (ret)
+ ath11k_warn(ab, "Failed to process ftm event\n");
+
+ kfree(tb);
+}
+
+void ath11k_tm_wmi_event(struct ath11k_base *ab, u32 cmd_id, struct sk_buff *skb)
+{
+ if (test_bit(ATH11K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
+ ath11k_tm_wmi_event_segmented(ab, cmd_id, skb);
+ else
+ ath11k_tm_wmi_event_unsegmented(ab, cmd_id, skb);
+}
+
+static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
+{
+ struct sk_buff *skb;
+ int ret;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+ "cmd get version_major %d version_minor %d\n",
+ ATH_TESTMODE_VERSION_MAJOR,
+ ATH_TESTMODE_VERSION_MINOR);
+
+ skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
+ nla_total_size(sizeof(u32)));
+ if (!skb)
+ return -ENOMEM;
+
+ ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MAJOR,
+ ATH_TESTMODE_VERSION_MAJOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MINOR,
+ ATH_TESTMODE_VERSION_MINOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ return cfg80211_testmode_reply(skb);
+}
+
+static int ath11k_tm_cmd_testmode_start(struct ath11k *ar, struct nlattr *tb[])
+{
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH11K_STATE_FTM) {
+ ret = -EALREADY;
+ goto err;
+ }
+
+ /* start utf only when the driver is not in use */
+ if (ar->state != ATH11K_STATE_OFF) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ ar->ab->testmode.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH,
+ GFP_KERNEL);
+ if (!ar->ab->testmode.eventdata) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ar->state = ATH11K_STATE_FTM;
+ ar->ftm_msgref = 0;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE, "cmd start\n");
+ return 0;
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[],
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct sk_buff *skb;
+ struct ath11k_vif *arvif;
+ u32 cmd_id, buf_len;
+ int ret, tag;
+ void *buf;
+ u32 *ptr;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (!tb[ATH_TM_ATTR_DATA]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!tb[ATH_TM_ATTR_WMI_CMDID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
+ if (!buf_len) {
+ ath11k_warn(ar->ab, "No data present in testmode wmi command\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd_id = nla_get_u32(tb[ATH_TM_ATTR_WMI_CMDID]);
+
+ /* Make sure that the buffer length is long enough to
+ * hold TLV and pdev/vdev id.
+ */
+ if (buf_len < sizeof(struct wmi_tlv) + sizeof(u32)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ptr = buf;
+ tag = FIELD_GET(WMI_TLV_TAG, *ptr);
+
+ /* pdev/vdev id start after TLV header */
+ ptr++;
+
+ if (tag == WMI_TAG_PDEV_SET_PARAM_CMD)
+ *ptr = ar->pdev->pdev_id;
+
+ if (ar->ab->fw_mode != ATH11K_FIRMWARE_MODE_FTM &&
+ (tag == WMI_TAG_VDEV_SET_PARAM_CMD || tag == WMI_TAG_UNIT_TEST_CMD)) {
+ if (vif) {
+ arvif = ath11k_vif_to_arvif(vif);
+ *ptr = arvif->vdev_id;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+ "cmd wmi cmd_id %d buf length %d\n",
+ cmd_id, buf_len);
+
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(skb->data, buf, buf_len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, cmd_id);
+ if (ret) {
+ dev_kfree_skb(skb);
+ ath11k_warn(ar->ab, "failed to transmit wmi command (testmode): %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_tm_cmd_wmi_ftm(struct ath11k *ar, struct nlattr *tb[])
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = ar->ab;
+ struct sk_buff *skb;
+ u32 cmd_id, buf_len, hdr_info;
+ int ret;
+ void *buf;
+ u8 segnumber = 0, seginfo;
+ u16 chunk_len, total_bytes, num_segments;
+ u8 *bufpos;
+ struct wmi_ftm_cmd *ftm_cmd;
+
+ set_bit(ATH11K_FLAG_FTM_SEGMENTED, &ab->dev_flags);
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_FTM) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (!tb[ATH_TM_ATTR_DATA]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
+ cmd_id = WMI_PDEV_UTF_CMDID;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+ "cmd wmi ftm cmd_id %d buffer length %d\n",
+ cmd_id, buf_len);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+ bufpos = buf;
+ total_bytes = buf_len;
+ num_segments = total_bytes / MAX_WMI_UTF_LEN;
+
+ if (buf_len - (num_segments * MAX_WMI_UTF_LEN))
+ num_segments++;
+
+ while (buf_len) {
+ chunk_len = min_t(u16, buf_len, MAX_WMI_UTF_LEN);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, (chunk_len +
+ sizeof(struct wmi_ftm_cmd)));
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ftm_cmd = (struct wmi_ftm_cmd *)skb->data;
+ hdr_info = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, (chunk_len +
+ sizeof(struct wmi_ftm_seg_hdr)));
+ ftm_cmd->tlv_header = hdr_info;
+ ftm_cmd->seg_hdr.len = total_bytes;
+ ftm_cmd->seg_hdr.msgref = ar->ftm_msgref;
+ seginfo = FIELD_PREP(ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS, num_segments) |
+ FIELD_PREP(ATH11K_FTM_SEGHDR_CURRENT_SEQ, segnumber);
+ ftm_cmd->seg_hdr.segmentinfo = seginfo;
+ segnumber++;
+
+ memcpy(&ftm_cmd->data, bufpos, chunk_len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, cmd_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send wmi ftm command: %d\n", ret);
+ goto out;
+ }
+
+ buf_len -= chunk_len;
+ bufpos += chunk_len;
+ }
+
+ ar->ftm_msgref++;
+ ret = 0;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct ath11k *ar = hw->priv;
+ struct nlattr *tb[ATH_TM_ATTR_MAX + 1];
+ int ret;
+
+ ret = nla_parse(tb, ATH_TM_ATTR_MAX, data, len, ath11k_tm_policy,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (!tb[ATH_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[ATH_TM_ATTR_CMD])) {
+ case ATH_TM_CMD_GET_VERSION:
+ return ath11k_tm_cmd_get_version(ar, tb);
+ case ATH_TM_CMD_WMI:
+ return ath11k_tm_cmd_wmi(ar, tb, vif);
+ case ATH_TM_CMD_TESTMODE_START:
+ return ath11k_tm_cmd_testmode_start(ar, tb);
+ case ATH_TM_CMD_WMI_FTM:
+ return ath11k_tm_cmd_wmi_ftm(ar, tb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/testmode.h b/drivers/net/wireless/ath/ath11k/testmode.h
new file mode 100644
index 000000000000..2f62f2c4422f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/testmode.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+
+#ifdef CONFIG_NL80211_TESTMODE
+
+void ath11k_tm_wmi_event(struct ath11k_base *ab, u32 cmd_id, struct sk_buff *skb);
+int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+
+#else
+
+static inline void ath11k_tm_wmi_event(struct ath11k_base *ab, u32 cmd_id,
+ struct sk_buff *skb)
+{
+}
+
+static inline int ath11k_tm_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
new file mode 100644
index 000000000000..18d6eab5cce3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include "core.h"
+#include "debug.h"
+
+static int
+ath11k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = ATH11K_THERMAL_THROTTLE_MAX;
+
+ return 0;
+}
+
+static int
+ath11k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct ath11k *ar = cdev->devdata;
+
+ mutex_lock(&ar->conf_mutex);
+ *state = ar->thermal.throttle_state;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int
+ath11k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long throttle_state)
+{
+ struct ath11k *ar = cdev->devdata;
+ int ret;
+
+ if (throttle_state > ATH11K_THERMAL_THROTTLE_MAX) {
+ ath11k_warn(ar->ab, "throttle state %ld is exceeding the limit %d\n",
+ throttle_state, ATH11K_THERMAL_THROTTLE_MAX);
+ return -EINVAL;
+ }
+ mutex_lock(&ar->conf_mutex);
+ ret = ath11k_thermal_set_throttling(ar, throttle_state);
+ if (ret == 0)
+ ar->thermal.throttle_state = throttle_state;
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct thermal_cooling_device_ops ath11k_thermal_ops = {
+ .get_max_state = ath11k_thermal_get_max_throttle_state,
+ .get_cur_state = ath11k_thermal_get_cur_throttle_state,
+ .set_cur_state = ath11k_thermal_set_cur_throttle_state,
+};
+
+static ssize_t ath11k_thermal_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ath11k *ar = dev_get_drvdata(dev);
+ int ret, temperature;
+ unsigned long time_left;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* Can't get temperature when the card is off */
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ reinit_completion(&ar->thermal.wmi_sync);
+ ret = ath11k_wmi_send_pdev_temperature_cmd(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to read temperature %d\n", ret);
+ goto out;
+ }
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) {
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+ ATH11K_THERMAL_SYNC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath11k_warn(ar->ab, "failed to synchronize thermal read\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ temperature = ar->thermal.temperature;
+ spin_unlock_bh(&ar->data_lock);
+
+ /* display in millidegree Celsius */
+ ret = sysfs_emit(buf, "%d\n", temperature * 1000);
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+void ath11k_thermal_event_temperature(struct ath11k *ar, int temperature)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->thermal.temperature = temperature;
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->thermal.wmi_sync);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, ath11k_thermal_show_temp,
+ NULL, 0);
+
+static struct attribute *ath11k_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ath11k_hwmon);
+
+int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct thermal_mitigation_params param;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON)
+ return 0;
+
+ memset(&param, 0, sizeof(param));
+ param.pdev_id = ar->pdev->pdev_id;
+ param.enable = throttle_state ? 1 : 0;
+ param.dc = ATH11K_THERMAL_DEFAULT_DUTY_CYCLE;
+ param.dc_per_event = 0xFFFFFFFF;
+
+ param.levelconf[0].tmplwm = ATH11K_THERMAL_TEMP_LOW_MARK;
+ param.levelconf[0].tmphwm = ATH11K_THERMAL_TEMP_HIGH_MARK;
+ param.levelconf[0].dcoffpercent = throttle_state;
+ param.levelconf[0].priority = 0; /* disable all data tx queues */
+
+ ret = ath11k_wmi_send_thermal_mitigation_param_cmd(ar, &param);
+ if (ret) {
+ ath11k_warn(ab, "failed to send thermal mitigation duty cycle %u ret %d\n",
+ throttle_state, ret);
+ }
+
+ return ret;
+}
+
+int ath11k_thermal_register(struct ath11k_base *ab)
+{
+ struct thermal_cooling_device *cdev;
+ struct device *hwmon_dev;
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i, ret;
+
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ cdev = thermal_cooling_device_register("ath11k_thermal", ar,
+ &ath11k_thermal_ops);
+
+ if (IS_ERR(cdev)) {
+ ath11k_err(ab, "failed to setup thermal device result: %ld\n",
+ PTR_ERR(cdev));
+ ret = -EINVAL;
+ goto err_thermal_destroy;
+ }
+
+ ar->thermal.cdev = cdev;
+
+ ret = sysfs_create_link(&ar->hw->wiphy->dev.kobj, &cdev->device.kobj,
+ "cooling_device");
+ if (ret) {
+ ath11k_err(ab, "failed to create cooling device symlink\n");
+ goto err_thermal_destroy;
+ }
+
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return 0;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&ar->hw->wiphy->dev,
+ "ath11k_hwmon", ar,
+ ath11k_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ ath11k_err(ar->ab, "failed to register hwmon device: %ld\n",
+ PTR_ERR(hwmon_dev));
+ ret = -EINVAL;
+ goto err_thermal_destroy;
+ }
+ }
+
+ return 0;
+
+err_thermal_destroy:
+ ath11k_thermal_unregister(ab);
+ return ret;
+}
+
+void ath11k_thermal_unregister(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ sysfs_remove_link(&ar->hw->wiphy->dev.kobj, "cooling_device");
+ thermal_cooling_device_unregister(ar->thermal.cdev);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/thermal.h b/drivers/net/wireless/ath/ath11k/thermal.h
new file mode 100644
index 000000000000..cdaf4e01d92e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/thermal.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH11K_THERMAL_
+#define _ATH11K_THERMAL_
+
+#define ATH11K_THERMAL_TEMP_LOW_MARK -100
+#define ATH11K_THERMAL_TEMP_HIGH_MARK 150
+#define ATH11K_THERMAL_THROTTLE_MAX 100
+#define ATH11K_THERMAL_DEFAULT_DUTY_CYCLE 100
+#define ATH11K_HWMON_NAME_LEN 15
+#define ATH11K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ)
+
+struct ath11k_thermal {
+ struct thermal_cooling_device *cdev;
+ struct completion wmi_sync;
+
+ /* protected by conf_mutex */
+ u32 throttle_state;
+ /* temperature value in Celsius degree
+ * protected by data_lock
+ */
+ int temperature;
+};
+
+#if IS_REACHABLE(CONFIG_THERMAL)
+int ath11k_thermal_register(struct ath11k_base *ab);
+void ath11k_thermal_unregister(struct ath11k_base *ab);
+int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state);
+void ath11k_thermal_event_temperature(struct ath11k *ar, int temperature);
+#else
+static inline int ath11k_thermal_register(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_thermal_unregister(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state)
+{
+ return 0;
+}
+
+static inline void ath11k_thermal_event_temperature(struct ath11k *ar,
+ int temperature)
+{
+}
+
+#endif
+#endif /* _ATH11K_THERMAL_ */
diff --git a/drivers/net/wireless/ath/ath11k/trace.c b/drivers/net/wireless/ath/ath11k/trace.c
new file mode 100644
index 000000000000..44ff8e9eff5d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/export.h>
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+EXPORT_SYMBOL(__tracepoint_ath11k_log_dbg);
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
new file mode 100644
index 000000000000..75246b0a82e3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <linux/tracepoint.h>
+#include "core.h"
+
+#define _TRACE_H_
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH11K_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {} \
+static inline bool trace_##name##_enabled(void) \
+{ \
+ return false; \
+}
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH11K_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath11k
+
+#define ATH11K_MSG_MAX 400
+
+TRACE_EVENT(ath11k_htt_pktlog,
+ TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len,
+ u32 pktlog_checksum),
+
+ TP_ARGS(ar, buf, buf_len, pktlog_checksum),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, buf_len)
+ __field(u32, pktlog_checksum)
+ __dynamic_array(u8, pktlog, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->buf_len = buf_len;
+ __entry->pktlog_checksum = pktlog_checksum;
+ memcpy(__get_dynamic_array(pktlog), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s size %u pktlog_checksum %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->buf_len,
+ __entry->pktlog_checksum
+ )
+);
+
+TRACE_EVENT(ath11k_htt_ppdu_stats,
+ TP_PROTO(struct ath11k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, len)
+ __dynamic_array(u8, ppdu, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->len = len;
+ memcpy(__get_dynamic_array(ppdu), data, len);
+ ),
+
+ TP_printk(
+ "%s %s ppdu len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath11k_htt_rxdesc,
+ TP_PROTO(struct ath11k *ar, const void *data, size_t log_type, size_t len),
+
+ TP_ARGS(ar, data, log_type, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, len)
+ __field(u16, log_type)
+ __dynamic_array(u8, rxdesc, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->len = len;
+ __entry->log_type = log_type;
+ memcpy(__get_dynamic_array(rxdesc), data, len);
+ ),
+
+ TP_printk(
+ "%s %s rxdesc len %d type %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len,
+ __entry->log_type
+ )
+);
+
+DECLARE_EVENT_CLASS(ath11k_log_event,
+ TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
+ TP_ARGS(ab, vaf),
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __vstring(msg, vaf->fmt, vaf->va)
+ ),
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __assign_vstr(msg, vaf->fmt, vaf->va);
+ ),
+ TP_printk(
+ "%s %s %s",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(msg)
+ )
+);
+
+DEFINE_EVENT(ath11k_log_event, ath11k_log_err,
+ TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
+ TP_ARGS(ab, vaf)
+);
+
+DEFINE_EVENT(ath11k_log_event, ath11k_log_warn,
+ TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
+ TP_ARGS(ab, vaf)
+);
+
+DEFINE_EVENT(ath11k_log_event, ath11k_log_info,
+ TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
+ TP_ARGS(ab, vaf)
+);
+
+TRACE_EVENT(ath11k_wmi_cmd,
+ TP_PROTO(struct ath11k_base *ab, int id, const void *buf, size_t buf_len),
+
+ TP_ARGS(ab, id, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->id = id;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s id %d len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->id,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath11k_wmi_event,
+ TP_PROTO(struct ath11k_base *ab, int id, const void *buf, size_t buf_len),
+
+ TP_ARGS(ab, id, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->id = id;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s id %d len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->id,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath11k_log_dbg,
+ TP_PROTO(struct ath11k_base *ab, unsigned int level, struct va_format *vaf),
+
+ TP_ARGS(ab, level, vaf),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(unsigned int, level)
+ __dynamic_array(char, msg, ATH11K_MSG_MAX)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->level = level;
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ ATH11K_MSG_MAX, vaf->fmt,
+ *vaf->va) >= ATH11K_MSG_MAX);
+ ),
+
+ TP_printk(
+ "%s %s %s",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(msg)
+ )
+);
+
+TRACE_EVENT(ath11k_log_dbg_dump,
+ TP_PROTO(struct ath11k_base *ab, const char *msg, const char *prefix,
+ const void *buf, size_t buf_len),
+
+ TP_ARGS(ab, msg, prefix, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __string(msg, msg)
+ __string(prefix, prefix)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __assign_str(msg);
+ __assign_str(prefix);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s %s/%s\n",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(prefix),
+ __get_str(msg)
+ )
+);
+
+TRACE_EVENT(ath11k_wmi_diag,
+ TP_PROTO(struct ath11k_base *ab, const void *data, size_t len),
+
+ TP_ARGS(ab, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(u16, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device);
+ __assign_str(driver);
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk(
+ "%s %s tlv diag len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath11k_ps_timekeeper,
+ TP_PROTO(struct ath11k *ar, const void *peer_addr,
+ u32 peer_ps_timestamp, u8 peer_ps_state),
+ TP_ARGS(ar, peer_addr, peer_ps_timestamp, peer_ps_state),
+
+ TP_STRUCT__entry(__string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __dynamic_array(u8, peer_addr, ETH_ALEN)
+ __field(u8, peer_ps_state)
+ __field(u32, peer_ps_timestamp)
+ ),
+
+ TP_fast_assign(__assign_str(device);
+ __assign_str(driver);
+ memcpy(__get_dynamic_array(peer_addr), peer_addr,
+ ETH_ALEN);
+ __entry->peer_ps_state = peer_ps_state;
+ __entry->peer_ps_timestamp = peer_ps_timestamp;
+ ),
+
+ TP_printk("%s %s %u %u",
+ __get_str(driver),
+ __get_str(device),
+ __entry->peer_ps_state,
+ __entry->peer_ps_timestamp
+ )
+);
+
+#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
new file mode 100644
index 000000000000..110035dae8a6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -0,0 +1,9909 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+#include <linux/completion.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/uuid.h>
+#include <linux/time.h>
+#include <linux/of.h>
+#include "core.h"
+#include "debug.h"
+#include "mac.h"
+#include "hw.h"
+#include "peer.h"
+#include "testmode.h"
+#include "p2p.h"
+
+struct wmi_tlv_policy {
+ size_t min_len;
+};
+
+struct wmi_tlv_svc_ready_parse {
+ bool wmi_svc_bitmap_done;
+};
+
+struct wmi_tlv_dma_ring_caps_parse {
+ struct wmi_dma_ring_capabilities *dma_ring_caps;
+ u32 n_dma_ring_caps;
+};
+
+struct wmi_tlv_svc_rdy_ext_parse {
+ struct ath11k_service_ext_param param;
+ struct wmi_soc_mac_phy_hw_mode_caps *hw_caps;
+ struct wmi_hw_mode_capabilities *hw_mode_caps;
+ u32 n_hw_mode_caps;
+ u32 tot_phy_id;
+ struct wmi_hw_mode_capabilities pref_hw_mode_caps;
+ struct wmi_mac_phy_capabilities *mac_phy_caps;
+ u32 n_mac_phy_caps;
+ struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps;
+ struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps;
+ u32 n_ext_hal_reg_caps;
+ struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
+ bool hw_mode_done;
+ bool mac_phy_done;
+ bool ext_hal_reg_done;
+ bool mac_phy_chainmask_combo_done;
+ bool mac_phy_chainmask_cap_done;
+ bool oem_dma_ring_cap_done;
+ bool dma_ring_cap_done;
+};
+
+struct wmi_tlv_svc_rdy_ext2_parse {
+ struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
+ bool dma_ring_cap_done;
+};
+
+struct wmi_tlv_rdy_parse {
+ u32 num_extra_mac_addr;
+};
+
+struct wmi_tlv_dma_buf_release_parse {
+ struct ath11k_wmi_dma_buf_release_fixed_param fixed;
+ struct wmi_dma_buf_release_entry *buf_entry;
+ struct wmi_dma_buf_release_meta_data *meta_data;
+ u32 num_buf_entry;
+ u32 num_meta;
+ bool buf_entry_done;
+ bool meta_data_done;
+};
+
+struct wmi_tlv_fw_stats_parse {
+ const struct wmi_stats_event *ev;
+ const struct wmi_per_chain_rssi_stats *rssi;
+ struct ath11k_fw_stats *stats;
+ int rssi_num;
+ bool chain_rssi_done;
+};
+
+struct wmi_tlv_mgmt_rx_parse {
+ const struct wmi_mgmt_rx_hdr *fixed;
+ const u8 *frame_buf;
+ bool frame_buf_done;
+};
+
+static const struct wmi_tlv_policy wmi_tlv_policies[] = {
+ [WMI_TAG_ARRAY_BYTE]
+ = { .min_len = 0 },
+ [WMI_TAG_ARRAY_UINT32]
+ = { .min_len = 0 },
+ [WMI_TAG_SERVICE_READY_EVENT]
+ = { .min_len = sizeof(struct wmi_service_ready_event) },
+ [WMI_TAG_SERVICE_READY_EXT_EVENT]
+ = { .min_len = sizeof(struct wmi_service_ready_ext_event) },
+ [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS]
+ = { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) },
+ [WMI_TAG_SOC_HAL_REG_CAPABILITIES]
+ = { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) },
+ [WMI_TAG_VDEV_START_RESPONSE_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_start_resp_event) },
+ [WMI_TAG_PEER_DELETE_RESP_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_delete_resp_event) },
+ [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]
+ = { .min_len = sizeof(struct wmi_bcn_tx_status_event) },
+ [WMI_TAG_VDEV_STOPPED_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_stopped_event) },
+ [WMI_TAG_REG_CHAN_LIST_CC_EVENT]
+ = { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) },
+ [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]
+ = { .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
+ [WMI_TAG_MGMT_RX_HDR]
+ = { .min_len = sizeof(struct wmi_mgmt_rx_hdr) },
+ [WMI_TAG_MGMT_TX_COMPL_EVENT]
+ = { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
+ [WMI_TAG_SCAN_EVENT]
+ = { .min_len = sizeof(struct wmi_scan_event) },
+ [WMI_TAG_PEER_STA_KICKOUT_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
+ [WMI_TAG_ROAM_EVENT]
+ = { .min_len = sizeof(struct wmi_roam_event) },
+ [WMI_TAG_CHAN_INFO_EVENT]
+ = { .min_len = sizeof(struct wmi_chan_info_event) },
+ [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]
+ = { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
+ [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
+ [WMI_TAG_READY_EVENT] = {
+ .min_len = sizeof(struct wmi_ready_event_min) },
+ [WMI_TAG_SERVICE_AVAILABLE_EVENT]
+ = {.min_len = sizeof(struct wmi_service_available_event) },
+ [WMI_TAG_PEER_ASSOC_CONF_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
+ [WMI_TAG_STATS_EVENT]
+ = { .min_len = sizeof(struct wmi_stats_event) },
+ [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
+ = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
+ [WMI_TAG_HOST_SWFDA_EVENT] = {
+ .min_len = sizeof(struct wmi_fils_discovery_event) },
+ [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
+ .min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
+ [WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
+ .min_len = sizeof(struct wmi_vdev_delete_resp_event) },
+ [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
+ .min_len = sizeof(struct wmi_obss_color_collision_event) },
+ [WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
+ .min_len = sizeof(struct wmi_11d_new_cc_ev) },
+ [WMI_TAG_PER_CHAIN_RSSI_STATS] = {
+ .min_len = sizeof(struct wmi_per_chain_rssi_stats) },
+ [WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_add_dialog_event) },
+ [WMI_TAG_P2P_NOA_INFO] = {
+ .min_len = sizeof(struct ath11k_wmi_p2p_noa_info) },
+ [WMI_TAG_P2P_NOA_EVENT] = {
+ .min_len = sizeof(struct wmi_p2p_noa_event) },
+};
+
+#define PRIMAP(_hw_mode_) \
+ [_hw_mode_] = _hw_mode_##_PRI
+
+static const int ath11k_hw_mode_pri_map[] = {
+ PRIMAP(WMI_HOST_HW_MODE_SINGLE),
+ PRIMAP(WMI_HOST_HW_MODE_DBS),
+ PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
+ PRIMAP(WMI_HOST_HW_MODE_SBS),
+ PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
+ PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
+ /* keep last */
+ PRIMAP(WMI_HOST_HW_MODE_MAX),
+};
+
+static int
+ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const void *begin = ptr;
+ const struct wmi_tlv *tlv;
+ u16 tlv_tag, tlv_len;
+ int ret;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath11k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+
+ tlv = ptr;
+ tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
+ tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+
+ if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
+ wmi_tlv_policies[tlv_tag].min_len &&
+ wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
+ ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
+ tlv_tag, ptr - begin, tlv_len,
+ wmi_tlv_policies[tlv_tag].min_len);
+ return -EINVAL;
+ }
+
+ ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+ if (ret)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_iter_parse(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const void **tb = data;
+
+ if (tag < WMI_TAG_MAX)
+ tb[tag] = ptr;
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb,
+ const void *ptr, size_t len)
+{
+ return ath11k_wmi_tlv_iter(ar, ptr, len, ath11k_wmi_tlv_iter_parse,
+ (void *)tb);
+}
+
+const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab,
+ struct sk_buff *skb, gfp_t gfp)
+{
+ const void **tb;
+ int ret;
+
+ tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
+ if (!tb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ath11k_wmi_tlv_parse(ab, tb, skb->data, skb->len);
+ if (ret) {
+ kfree(tb);
+ return ERR_PTR(ret);
+ }
+
+ return tb;
+}
+
+static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_cmd_hdr *cmd_hdr;
+ int ret;
+ u32 cmd = 0;
+
+ if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return -ENOMEM;
+
+ cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id);
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ cmd_hdr->cmd_id = cmd;
+
+ trace_ath11k_wmi_cmd(ab, cmd_id, skb->data, skb->len);
+
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ ret = ath11k_htc_send(&ab->htc, wmi->eid, skb);
+
+ if (ret)
+ goto err_pull;
+
+ return 0;
+
+err_pull:
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+ return ret;
+}
+
+int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath11k_wmi_base *wmi_ab = wmi->wmi_ab;
+ int ret = -EOPNOTSUPP;
+ struct ath11k_base *ab = wmi_ab->ab;
+
+ might_sleep();
+
+ if (ab->hw_params.credit_flow) {
+ wait_event_timeout(wmi_ab->tx_credits_wq, ({
+ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
+
+ if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
+ &wmi_ab->ab->dev_flags))
+ ret = -ESHUTDOWN;
+
+ (ret != -EAGAIN);
+ }), WMI_SEND_TIMEOUT_HZ);
+ } else {
+ wait_event_timeout(wmi->tx_ce_desc_wq, ({
+ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
+
+ if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
+ &wmi_ab->ab->dev_flags))
+ ret = -ESHUTDOWN;
+
+ (ret != -ENOBUFS);
+ }), WMI_SEND_TIMEOUT_HZ);
+ }
+
+ if (ret == -EAGAIN)
+ ath11k_warn(wmi_ab->ab, "wmi command %d timeout\n", cmd_id);
+
+ if (ret == -ENOBUFS)
+ ath11k_warn(wmi_ab->ab, "ce desc not available for wmi command %d\n",
+ cmd_id);
+
+ return ret;
+}
+
+static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
+ const void *ptr,
+ struct ath11k_service_ext_param *param)
+{
+ const struct wmi_service_ready_ext_event *ev = ptr;
+
+ if (!ev)
+ return -EINVAL;
+
+ /* Move this to host based bitmap */
+ param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits;
+ param->default_fw_config_bits = ev->default_fw_config_bits;
+ param->he_cap_info = ev->he_cap_info;
+ param->mpdu_density = ev->mpdu_density;
+ param->max_bssid_rx_filters = ev->max_bssid_rx_filters;
+ memcpy(&param->ppet, &ev->ppet, sizeof(param->ppet));
+
+ return 0;
+}
+
+static int
+ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
+ struct wmi_soc_mac_phy_hw_mode_caps *hw_caps,
+ struct wmi_hw_mode_capabilities *wmi_hw_mode_caps,
+ struct wmi_soc_hal_reg_capabilities *hal_reg_caps,
+ struct wmi_mac_phy_capabilities *wmi_mac_phy_caps,
+ u8 hw_mode_id, u8 phy_id,
+ struct ath11k_pdev *pdev)
+{
+ struct wmi_mac_phy_capabilities *mac_phy_caps;
+ struct ath11k_base *ab = wmi_handle->wmi_ab->ab;
+ struct ath11k_band_cap *cap_band;
+ struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
+ u32 phy_map;
+ u32 hw_idx, phy_idx = 0;
+
+ if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps)
+ return -EINVAL;
+
+ for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) {
+ if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id)
+ break;
+
+ phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map;
+ while (phy_map) {
+ phy_map >>= 1;
+ phy_idx++;
+ }
+ }
+
+ if (hw_idx == hw_caps->num_hw_modes)
+ return -EINVAL;
+
+ phy_idx += phy_id;
+ if (phy_id >= hal_reg_caps->num_phy)
+ return -EINVAL;
+
+ mac_phy_caps = wmi_mac_phy_caps + phy_idx;
+
+ pdev->pdev_id = mac_phy_caps->pdev_id;
+ pdev_cap->supported_bands |= mac_phy_caps->supported_bands;
+ pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
+ ab->target_pdev_ids[ab->target_pdev_count].supported_bands =
+ mac_phy_caps->supported_bands;
+ ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
+ ab->target_pdev_count++;
+
+ if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+ !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP))
+ return -EINVAL;
+
+ /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
+ * band to band for a single radio, need to see how this should be
+ * handled.
+ */
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
+ pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
+ }
+
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
+ pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
+ pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
+ pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
+ pdev_cap->nss_ratio_enabled =
+ WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
+ pdev_cap->nss_ratio_info =
+ WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
+ }
+
+ /* tx/rx chainmask reported from fw depends on the actual hw chains used,
+ * For example, for 4x4 capable macphys, first 4 chains can be used for first
+ * mac and the remaining 4 chains can be used for the second mac or vice-versa.
+ * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
+ * will be advertised for second mac or vice-versa. Compute the shift value
+ * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
+ * mac80211.
+ */
+ pdev_cap->tx_chain_mask_shift =
+ find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
+ pdev_cap->rx_chain_mask_shift =
+ find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
+
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
+ cap_band->phy_id = mac_phy_caps->phy_id;
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
+ sizeof(struct ath11k_ppe_threshold));
+ }
+
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
+ cap_band->phy_id = mac_phy_caps->phy_id;
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
+ sizeof(struct ath11k_ppe_threshold));
+
+ cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
+ sizeof(struct ath11k_ppe_threshold));
+ }
+
+ return 0;
+}
+
+static int
+ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi *wmi_handle,
+ struct wmi_soc_hal_reg_capabilities *reg_caps,
+ struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap,
+ u8 phy_idx,
+ struct ath11k_hal_reg_capabilities_ext *param)
+{
+ struct wmi_hal_reg_capabilities_ext *ext_reg_cap;
+
+ if (!reg_caps || !wmi_ext_reg_cap)
+ return -EINVAL;
+
+ if (phy_idx >= reg_caps->num_phy)
+ return -EINVAL;
+
+ ext_reg_cap = &wmi_ext_reg_cap[phy_idx];
+
+ param->phy_id = ext_reg_cap->phy_id;
+ param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain;
+ param->eeprom_reg_domain_ext =
+ ext_reg_cap->eeprom_reg_domain_ext;
+ param->regcap1 = ext_reg_cap->regcap1;
+ param->regcap2 = ext_reg_cap->regcap2;
+ /* check if param->wireless_mode is needed */
+ param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan;
+ param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan;
+ param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan;
+ param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan;
+
+ return 0;
+}
+
+static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab,
+ const void *evt_buf,
+ struct ath11k_targ_cap *cap)
+{
+ const struct wmi_service_ready_event *ev = evt_buf;
+
+ if (!ev) {
+ ath11k_err(ab, "%s: failed by NULL param\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ cap->phy_capability = ev->phy_capability;
+ cap->max_frag_entry = ev->max_frag_entry;
+ cap->num_rf_chains = ev->num_rf_chains;
+ cap->ht_cap_info = ev->ht_cap_info;
+ cap->vht_cap_info = ev->vht_cap_info;
+ cap->vht_supp_mcs = ev->vht_supp_mcs;
+ cap->hw_min_tx_power = ev->hw_min_tx_power;
+ cap->hw_max_tx_power = ev->hw_max_tx_power;
+ cap->sys_cap_info = ev->sys_cap_info;
+ cap->min_pkt_size_enable = ev->min_pkt_size_enable;
+ cap->max_bcn_ie_size = ev->max_bcn_ie_size;
+ cap->max_num_scan_channels = ev->max_num_scan_channels;
+ cap->max_supported_macs = ev->max_supported_macs;
+ cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps;
+ cap->txrx_chainmask = ev->txrx_chainmask;
+ cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
+ cap->num_msdu_desc = ev->num_msdu_desc;
+
+ return 0;
+}
+
+/* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
+ * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
+ * 4-byte word.
+ */
+static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi *wmi,
+ const u32 *wmi_svc_bm)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
+ do {
+ if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, wmi->wmi_ab->svc_map);
+ } while (++j % WMI_SERVICE_BITS_IN_SIZE32);
+ }
+}
+
+static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_ready_parse *svc_ready = data;
+ struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
+ u16 expect_len;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_READY_EVENT:
+ if (ath11k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
+ return -EINVAL;
+ break;
+
+ case WMI_TAG_ARRAY_UINT32:
+ if (!svc_ready->wmi_svc_bitmap_done) {
+ expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
+ if (len < expect_len) {
+ ath11k_warn(ab, "invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ ath11k_wmi_service_bitmap_copy(wmi_handle, ptr);
+
+ svc_ready->wmi_svc_bitmap_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath11k_service_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_tlv_svc_ready_parse svc_ready = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_svc_rdy_parse,
+ &svc_ready);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready");
+
+ return 0;
+}
+
+struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_ab, u32 len)
+{
+ struct sk_buff *skb;
+ struct ath11k_base *ab = wmi_ab->ab;
+ u32 round_len = roundup(len, 4);
+
+ skb = ath11k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, WMI_SKB_HEADROOM);
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath11k_warn(ab, "unaligned WMI skb data\n");
+
+ skb_put(skb, round_len);
+ memset(skb->data, 0, round_len);
+
+ return skb;
+}
+
+static u32 ath11k_wmi_mgmt_get_freq(struct ath11k *ar,
+ struct ieee80211_tx_info *info)
+{
+ struct ath11k_base *ab = ar->ab;
+ u32 freq = 0;
+
+ if (ab->hw_params.support_off_channel_tx &&
+ ar->scan.is_roc &&
+ (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ return freq;
+}
+
+int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
+ struct sk_buff *frame)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
+ struct wmi_mgmt_send_cmd *cmd;
+ struct wmi_tlv *frame_tlv;
+ struct sk_buff *skb;
+ u32 buf_len;
+ int ret, len;
+
+ buf_len = frame->len < WMI_MGMT_SEND_DOWNLD_LEN ?
+ frame->len : WMI_MGMT_SEND_DOWNLD_LEN;
+
+ len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mgmt_send_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->desc_id = buf_id;
+ cmd->chanfreq = ath11k_wmi_mgmt_get_freq(ar, info);
+ cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr);
+ cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr);
+ cmd->frame_len = frame->len;
+ cmd->buf_len = buf_len;
+ cmd->tx_params_valid = 0;
+
+ frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, buf_len);
+
+ memcpy(frame_tlv->value, frame->data, buf_len);
+
+ ath11k_ce_byte_swap(frame_tlv->value, buf_len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd mgmt tx send");
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr,
+ struct vdev_create_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_create_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_vdev_txrx_streams *txrx_streams;
+ struct wmi_tlv *tlv;
+ int ret, len;
+ void *ptr;
+
+ /* It can be optimized my sending tx/rx chain configuration
+ * only for supported bands instead of always sending it for
+ * both the bands.
+ */
+ len = sizeof(*cmd) + TLV_HDR_SIZE +
+ (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_create_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->if_id;
+ cmd->vdev_type = param->type;
+ cmd->vdev_subtype = param->subtype;
+ cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX;
+ cmd->pdev_id = param->pdev_id;
+ cmd->mbssid_flags = param->mbssid_flags;
+ cmd->mbssid_tx_vdev_id = param->mbssid_tx_vdev_id;
+
+ ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+
+ ptr = skb->data + sizeof(*cmd);
+ len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+ txrx_streams = ptr;
+ len = sizeof(*txrx_streams);
+ txrx_streams->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
+ txrx_streams->supported_tx_streams =
+ param->chains[NL80211_BAND_2GHZ].tx;
+ txrx_streams->supported_rx_streams =
+ param->chains[NL80211_BAND_2GHZ].rx;
+
+ txrx_streams++;
+ txrx_streams->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
+ txrx_streams->supported_tx_streams =
+ param->chains[NL80211_BAND_5GHZ].tx;
+ txrx_streams->supported_rx_streams =
+ param->chains[NL80211_BAND_5GHZ].rx;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to submit WMI_VDEV_CREATE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd vdev create id %d type %d subtype %d macaddr %pM pdevid %d\n",
+ param->if_id, param->type, param->subtype,
+ macaddr, param->pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_delete_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_delete_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DELETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev delete id %d\n", vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_stop_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_stop_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_STOP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev stop id 0x%x\n", vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_down_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_down_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DOWN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev down id 0x%x\n", vdev_id);
+
+ return ret;
+}
+
+static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan,
+ struct wmi_vdev_start_req_arg *arg)
+{
+ u32 center_freq1 = arg->channel.band_center_freq1;
+
+ memset(chan, 0, sizeof(*chan));
+
+ chan->mhz = arg->channel.freq;
+ chan->band_center_freq1 = arg->channel.band_center_freq1;
+
+ if (arg->channel.mode == MODE_11AX_HE160) {
+ if (arg->channel.freq > arg->channel.band_center_freq1)
+ chan->band_center_freq1 = center_freq1 + 40;
+ else
+ chan->band_center_freq1 = center_freq1 - 40;
+
+ chan->band_center_freq2 = arg->channel.band_center_freq1;
+
+ } else if ((arg->channel.mode == MODE_11AC_VHT80_80) ||
+ (arg->channel.mode == MODE_11AX_HE80_80)) {
+ chan->band_center_freq2 = arg->channel.band_center_freq2;
+ } else {
+ chan->band_center_freq2 = 0;
+ }
+
+ chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
+ if (arg->channel.passive)
+ chan->info |= WMI_CHAN_INFO_PASSIVE;
+ if (arg->channel.allow_ibss)
+ chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED;
+ if (arg->channel.allow_ht)
+ chan->info |= WMI_CHAN_INFO_ALLOW_HT;
+ if (arg->channel.allow_vht)
+ chan->info |= WMI_CHAN_INFO_ALLOW_VHT;
+ if (arg->channel.allow_he)
+ chan->info |= WMI_CHAN_INFO_ALLOW_HE;
+ if (arg->channel.ht40plus)
+ chan->info |= WMI_CHAN_INFO_HT40_PLUS;
+ if (arg->channel.chan_radar)
+ chan->info |= WMI_CHAN_INFO_DFS;
+ if (arg->channel.freq2_radar)
+ chan->info |= WMI_CHAN_INFO_DFS_FREQ2;
+
+ chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
+ arg->channel.max_power) |
+ FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
+ arg->channel.max_reg_power);
+
+ chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
+ arg->channel.max_antenna_gain) |
+ FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
+ arg->channel.max_power);
+}
+
+int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
+ bool restart)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_start_request_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_channel *chan;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int ret, len;
+
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+ return -EINVAL;
+
+ len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_START_REQUEST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ cmd->beacon_interval = arg->bcn_intval;
+ cmd->bcn_tx_rate = arg->bcn_tx_rate;
+ cmd->dtim_period = arg->dtim_period;
+ cmd->num_noa_descriptors = arg->num_noa_descriptors;
+ cmd->preferred_rx_streams = arg->pref_rx_streams;
+ cmd->preferred_tx_streams = arg->pref_tx_streams;
+ cmd->cac_duration_ms = arg->cac_duration_ms;
+ cmd->regdomain = arg->regdomain;
+ cmd->he_ops = arg->he_ops;
+ cmd->mbssid_flags = arg->mbssid_flags;
+ cmd->mbssid_tx_vdev_id = arg->mbssid_tx_vdev_id;
+
+ if (!restart) {
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = arg->ssid_len;
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+ if (arg->hidden_ssid)
+ cmd->flags |= WMI_VDEV_START_HIDDEN_SSID;
+ if (arg->pmf_enabled)
+ cmd->flags |= WMI_VDEV_START_PMF_ENABLED;
+ }
+
+ cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED;
+
+ ptr = skb->data + sizeof(*cmd);
+ chan = ptr;
+
+ ath11k_wmi_put_wmi_channel(chan, arg);
+
+ chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*chan) - TLV_HDR_SIZE);
+ ptr += sizeof(*chan);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ /* Note: This is a nested TLV containing:
+ * [wmi_tlv][ath11k_wmi_p2p_noa_descriptor][wmi_tlv]..
+ */
+
+ ptr += sizeof(*tlv);
+
+ if (restart)
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_RESTART_REQUEST_CMDID);
+ else
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_START_REQUEST_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
+ restart ? "restart" : "start");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev %s id 0x%x freq 0x%x mode 0x%x\n",
+ restart ? "restart" : "start", arg->vdev_id,
+ arg->channel.freq, arg->channel.mode);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid,
+ u8 *tx_bssid, u32 nontx_profile_idx, u32 nontx_profile_cnt)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_up_cmd *cmd;
+ struct ieee80211_bss_conf *bss_conf;
+ struct ath11k_vif *arvif;
+ struct sk_buff *skb;
+ int ret;
+
+ arvif = ath11k_mac_get_arvif(ar, vdev_id);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_up_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_UP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->vdev_assoc_id = aid;
+
+ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+ cmd->nontx_profile_idx = nontx_profile_idx;
+ cmd->nontx_profile_cnt = nontx_profile_cnt;
+ if (tx_bssid)
+ ether_addr_copy(cmd->tx_vdev_bssid.addr, tx_bssid);
+
+ if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) {
+ bss_conf = &arvif->vif->bss_conf;
+
+ if (bss_conf->nontransmitted) {
+ ether_addr_copy(cmd->tx_vdev_bssid.addr,
+ bss_conf->transmitter_bssid);
+ cmd->nontx_profile_idx = bss_conf->bssid_index;
+ cmd->nontx_profile_cnt = bss_conf->bssid_indicator;
+ }
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd vdev up id 0x%x assoc id %d bssid %pM\n",
+ vdev_id, aid, bssid);
+
+ return ret;
+}
+
+int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar,
+ struct peer_create_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_create_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_create_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, param->peer_addr);
+ cmd->peer_type = param->peer_type;
+ cmd->vdev_id = param->vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd peer create vdev_id %d peer_addr %pM\n",
+ param->vdev_id, param->peer_addr);
+
+ return ret;
+}
+
+int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar,
+ const u8 *peer_addr, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_delete_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_delete_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd peer delete vdev_id %d peer_addr %pM\n",
+ vdev_id, peer_addr);
+
+ return ret;
+}
+
+int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar,
+ struct pdev_set_regdomain_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_regdomain_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SET_REGDOMAIN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->reg_domain = param->current_rd_in_use;
+ cmd->reg_domain_2g = param->current_rd_2g;
+ cmd->reg_domain_5g = param->current_rd_5g;
+ cmd->conformance_test_limit_2g = param->ctl_2g;
+ cmd->conformance_test_limit_5g = param->ctl_5g;
+ cmd->dfs_domain = param->dfs_domain;
+ cmd->pdev_id = param->pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
+ param->current_rd_in_use, param->current_rd_2g,
+ param->current_rd_5g, param->dfs_domain, param->pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr,
+ u32 vdev_id, u32 param_id, u32 param_val)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_set_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = vdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_val;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd peer set param vdev %d peer 0x%pM set param %d value %d\n",
+ vdev_id, peer_addr, param_id, param_val);
+
+ return ret;
+}
+
+int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar,
+ u8 peer_addr[ETH_ALEN],
+ struct peer_flush_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_flush_tids_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_FLUSH_TIDS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->peer_tid_bitmap = param->peer_tid_bitmap;
+ cmd->vdev_id = param->vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd peer flush tids vdev_id %d peer_addr %pM tids %08x\n",
+ param->vdev_id, peer_addr, param->peer_tid_bitmap);
+
+ return ret;
+}
+
+int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar,
+ int vdev_id, const u8 *addr,
+ dma_addr_t paddr, u8 tid,
+ u8 ba_window_size_valid,
+ u32 ba_window_size)
+{
+ struct wmi_peer_reorder_queue_setup_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_REORDER_QUEUE_SETUP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, addr);
+ cmd->vdev_id = vdev_id;
+ cmd->tid = tid;
+ cmd->queue_ptr_lo = lower_32_bits(paddr);
+ cmd->queue_ptr_hi = upper_32_bits(paddr);
+ cmd->queue_no = tid;
+ cmd->ba_window_size_valid = ba_window_size_valid;
+ cmd->ba_window_size = ba_window_size;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb,
+ WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd peer reorder queue setup addr %pM vdev_id %d tid %d\n",
+ addr, vdev_id, tid);
+
+ return ret;
+}
+
+int
+ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar,
+ struct rx_reorder_queue_remove_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_reorder_queue_remove_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_REORDER_QUEUE_REMOVE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, param->peer_macaddr);
+ cmd->vdev_id = param->vdev_id;
+ cmd->tid_mask = param->peer_tid_bitmap;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd peer reorder queue remove peer_macaddr %pM vdev_id %d tid_map %d",
+ param->peer_macaddr, param->vdev_id, param->peer_tid_bitmap);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
+ u32 param_value, u8 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev set param %d pdev id %d value %d\n",
+ param_id, pdev_id, param_value);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id,
+ enum wmi_sta_ps_mode psmode)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_ps_mode_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->sta_ps_mode = psmode;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd sta powersave mode psmode %d vdev id %d\n",
+ psmode, vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
+ u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_suspend_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SUSPEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->suspend_opt = suspend_opt;
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev suspend pdev_id %d\n", pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_resume_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_resume_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_RESUME_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev resume pdev id %d\n", pdev_id);
+
+ return ret;
+}
+
+/* TODO FW Support for the cmd is not available yet.
+ * Can be tested once the command and corresponding
+ * event is implemented in FW
+ */
+int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
+ enum wmi_bss_chan_info_req_type type)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_bss_chan_info_req_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->req_type = type;
+ cmd->pdev_id = ar->pdev->pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev bss chan info request type %d\n", type);
+
+ return ret;
+}
+
+int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr,
+ struct ap_ps_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_ap_ps_peer_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_AP_PS_PEER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->param = param->param;
+ cmd->value = param->value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd ap ps peer param vdev id %d peer %pM param %d value %d\n",
+ param->vdev_id, peer_addr, param->param, param->value);
+
+ return ret;
+}
+
+int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id,
+ u32 param, u32 param_value)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_sta_powersave_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_POWERSAVE_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->param = param;
+ cmd->value = param_value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd set powersave param vdev_id %d param %d value %d\n",
+ vdev_id, param, param_value);
+
+ return ret;
+}
+
+int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_FORCE_FW_HANG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->type = type;
+ cmd->delay_time_ms = delay_time_ms;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd force fw hang");
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd vdev set param vdev 0x%x param %d value %d\n",
+ vdev_id, param_id, param_value);
+
+ return ret;
+}
+
+int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
+ struct stats_request_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_request_stats_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_request_stats_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_REQUEST_STATS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->stats_id = param->stats_id;
+ cmd->vdev_id = param->vdev_id;
+ cmd->pdev_id = param->pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd request stats 0x%x vdev id %d pdev id %d\n",
+ param->stats_id, param->vdev_id, param->pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k *ar)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_get_pdev_temperature_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_GET_TEMPERATURE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
+ u32 vdev_id, u32 bcn_ctrl_op)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_bcn_offload_ctrl_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_BCN_OFFLOAD_CTRL_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->bcn_ctrl_op = bcn_ctrl_op;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd bcn offload ctrl vdev id %d ctrl_op %d\n",
+ vdev_id, bcn_ctrl_op);
+
+ return ret;
+}
+
+int ath11k_wmi_p2p_go_bcn_ie(struct ath11k *ar, u32 vdev_id,
+ const u8 *p2p_ie)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
+ size_t p2p_ie_len, aligned_len;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret, len;
+
+ p2p_ie_len = p2p_ie[1] + 2;
+ aligned_len = roundup(p2p_ie_len, 4);
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_p2p_go_set_beacon_ie_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_P2P_GO_SET_BEACON_IE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->ie_buf_len = p2p_ie_len;
+
+ tlv = (struct wmi_tlv *)cmd->tlv;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, aligned_len);
+ memcpy(tlv->value, p2p_ie, p2p_ie_len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_mutable_offsets *offs,
+ struct sk_buff *bcn, u32 ema_params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_bcn_tmpl_cmd *cmd;
+ struct wmi_bcn_prb_info *bcn_prb_info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+ size_t aligned_len = roundup(bcn->len, 4);
+ struct ieee80211_vif *vif;
+ struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev_id);
+
+ if (!arvif) {
+ ath11k_warn(ar->ab, "failed to find arvif with vdev id %d\n", vdev_id);
+ return -EINVAL;
+ }
+
+ vif = arvif->vif;
+
+ len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_TMPL_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->tim_ie_offset = offs->tim_offset;
+
+ if (vif->bss_conf.csa_active) {
+ cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
+ cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
+ }
+
+ cmd->buf_len = bcn->len;
+ cmd->mbssid_ie_offset = offs->mbssid_off;
+ cmd->ema_params = ema_params;
+
+ ptr = skb->data + sizeof(*cmd);
+
+ bcn_prb_info = ptr;
+ len = sizeof(*bcn_prb_info);
+ bcn_prb_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_BCN_PRB_INFO) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ bcn_prb_info->caps = 0;
+ bcn_prb_info->erp = 0;
+
+ ptr += sizeof(*bcn_prb_info);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, aligned_len);
+ memcpy(tlv->value, bcn->data, bcn->len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd bcn tmpl");
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_install_key(struct ath11k *ar,
+ struct wmi_vdev_install_key_arg *arg)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret, len;
+ int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_INSTALL_KEY_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+ cmd->key_idx = arg->key_idx;
+ cmd->key_flags = arg->key_flags;
+ cmd->key_cipher = arg->key_cipher;
+ cmd->key_len = arg->key_len;
+ cmd->key_txmic_len = arg->key_txmic_len;
+ cmd->key_rxmic_len = arg->key_rxmic_len;
+
+ if (arg->key_rsc_counter)
+ memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter,
+ sizeof(struct wmi_key_seq_counter));
+
+ tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
+ if (arg->key_data)
+ memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd vdev install key idx %d cipher %d len %d\n",
+ arg->key_idx, arg->key_cipher, arg->key_len);
+
+ return ret;
+}
+
+static inline void
+ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
+ struct peer_assoc_params *param,
+ bool hw_crypto_disabled)
+{
+ cmd->peer_flags = 0;
+
+ if (param->is_wme_set) {
+ if (param->qos_flag)
+ cmd->peer_flags |= WMI_PEER_QOS;
+ if (param->apsd_flag)
+ cmd->peer_flags |= WMI_PEER_APSD;
+ if (param->ht_flag)
+ cmd->peer_flags |= WMI_PEER_HT;
+ if (param->bw_40)
+ cmd->peer_flags |= WMI_PEER_40MHZ;
+ if (param->bw_80)
+ cmd->peer_flags |= WMI_PEER_80MHZ;
+ if (param->bw_160)
+ cmd->peer_flags |= WMI_PEER_160MHZ;
+
+ /* Typically if STBC is enabled for VHT it should be enabled
+ * for HT as well
+ **/
+ if (param->stbc_flag)
+ cmd->peer_flags |= WMI_PEER_STBC;
+
+ /* Typically if LDPC is enabled for VHT it should be enabled
+ * for HT as well
+ **/
+ if (param->ldpc_flag)
+ cmd->peer_flags |= WMI_PEER_LDPC;
+
+ if (param->static_mimops_flag)
+ cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS;
+ if (param->dynamic_mimops_flag)
+ cmd->peer_flags |= WMI_PEER_DYN_MIMOPS;
+ if (param->spatial_mux_flag)
+ cmd->peer_flags |= WMI_PEER_SPATIAL_MUX;
+ if (param->vht_flag)
+ cmd->peer_flags |= WMI_PEER_VHT;
+ if (param->he_flag)
+ cmd->peer_flags |= WMI_PEER_HE;
+ if (param->twt_requester)
+ cmd->peer_flags |= WMI_PEER_TWT_REQ;
+ if (param->twt_responder)
+ cmd->peer_flags |= WMI_PEER_TWT_RESP;
+ }
+
+ /* Suppress authorization for all AUTH modes that need 4-way handshake
+ * (during re-association).
+ * Authorization will be done for these modes on key installation.
+ */
+ if (param->auth_flag)
+ cmd->peer_flags |= WMI_PEER_AUTH;
+ if (param->need_ptk_4_way) {
+ cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
+ if (!hw_crypto_disabled && param->is_assoc)
+ cmd->peer_flags &= ~WMI_PEER_AUTH;
+ }
+ if (param->need_gtk_2_way)
+ cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
+ /* safe mode bypass the 4-way handshake */
+ if (param->safe_mode_enabled)
+ cmd->peer_flags &= ~(WMI_PEER_NEED_PTK_4_WAY |
+ WMI_PEER_NEED_GTK_2_WAY);
+
+ if (param->is_pmf_enabled)
+ cmd->peer_flags |= WMI_PEER_PMF;
+
+ /* Disable AMSDU for station transmit, if user configures it */
+ /* Disable AMSDU for AP transmit to 11n Stations, if user configures
+ * it
+ * if (param->amsdu_disable) Add after FW support
+ **/
+
+ /* Target asserts if node is marked HT and all MCS is set to 0.
+ * Mark the node as non-HT if all the mcs rates are disabled through
+ * iwpriv
+ **/
+ if (param->peer_ht_rates.num_rates == 0)
+ cmd->peer_flags &= ~WMI_PEER_HT;
+}
+
+int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
+ struct peer_assoc_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_assoc_complete_cmd *cmd;
+ struct wmi_vht_rate_set *mcs;
+ struct wmi_he_rate_set *he_mcs;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ u32 peer_legacy_rates_align;
+ u32 peer_ht_rates_align;
+ int i, ret, len;
+
+ peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates,
+ sizeof(u32));
+ peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates,
+ sizeof(u32));
+
+ len = sizeof(*cmd) +
+ TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
+ TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
+ sizeof(*mcs) + TLV_HDR_SIZE +
+ (sizeof(*he_mcs) * param->peer_he_mcs_count);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PEER_ASSOC_COMPLETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+
+ cmd->peer_new_assoc = param->peer_new_assoc;
+ cmd->peer_associd = param->peer_associd;
+
+ ath11k_wmi_copy_peer_flags(cmd, param,
+ test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED,
+ &ar->ab->dev_flags));
+
+ ether_addr_copy(cmd->peer_macaddr.addr, param->peer_mac);
+
+ cmd->peer_rate_caps = param->peer_rate_caps;
+ cmd->peer_caps = param->peer_caps;
+ cmd->peer_listen_intval = param->peer_listen_intval;
+ cmd->peer_ht_caps = param->peer_ht_caps;
+ cmd->peer_max_mpdu = param->peer_max_mpdu;
+ cmd->peer_mpdu_density = param->peer_mpdu_density;
+ cmd->peer_vht_caps = param->peer_vht_caps;
+ cmd->peer_phymode = param->peer_phymode;
+
+ /* Update 11ax capabilities */
+ cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0];
+ cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1];
+ cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal;
+ cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz;
+ cmd->peer_he_ops = param->peer_he_ops;
+ memcpy(&cmd->peer_he_cap_phy, &param->peer_he_cap_phyinfo,
+ sizeof(param->peer_he_cap_phyinfo));
+ memcpy(&cmd->peer_ppet, &param->peer_ppet,
+ sizeof(param->peer_ppet));
+
+ /* Update peer legacy rate information */
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, peer_legacy_rates_align);
+
+ ptr += TLV_HDR_SIZE;
+
+ cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates;
+ memcpy(ptr, param->peer_legacy_rates.rates,
+ param->peer_legacy_rates.num_rates);
+
+ /* Update peer HT rate information */
+ ptr += peer_legacy_rates_align;
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, peer_ht_rates_align);
+ ptr += TLV_HDR_SIZE;
+ cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates;
+ memcpy(ptr, param->peer_ht_rates.rates,
+ param->peer_ht_rates.num_rates);
+
+ /* VHT Rates */
+ ptr += peer_ht_rates_align;
+
+ mcs = ptr;
+
+ mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VHT_RATE_SET) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*mcs) - TLV_HDR_SIZE);
+
+ cmd->peer_nss = param->peer_nss;
+
+ /* Update bandwidth-NSS mapping */
+ cmd->peer_bw_rxnss_override = 0;
+ cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override;
+
+ if (param->vht_capable) {
+ /* firmware interprets mcs->tx_mcs_set field as peer's
+ * RX capability
+ */
+ mcs->tx_max_rate = param->rx_max_rate;
+ mcs->tx_mcs_set = param->rx_mcs_set;
+ mcs->rx_max_rate = param->tx_max_rate;
+ mcs->rx_mcs_set = param->tx_mcs_set;
+ }
+
+ /* HE Rates */
+ cmd->peer_he_mcs = param->peer_he_mcs_count;
+ cmd->min_data_rate = param->min_data_rate;
+
+ ptr += sizeof(*mcs);
+
+ len = param->peer_he_mcs_count * sizeof(*he_mcs);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+
+ /* Loop through the HE rate set */
+ for (i = 0; i < param->peer_he_mcs_count; i++) {
+ he_mcs = ptr;
+ he_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_HE_RATE_SET) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*he_mcs) - TLV_HDR_SIZE);
+
+ /* firmware interprets mcs->rx_mcs_set field as peer's
+ * RX capability
+ */
+ he_mcs->rx_mcs_set = param->peer_he_rx_mcs_set[i];
+ he_mcs->tx_mcs_set = param->peer_he_tx_mcs_set[i];
+ ptr += sizeof(*he_mcs);
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_ASSOC_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
+ cmd->vdev_id, cmd->peer_associd, param->peer_mac,
+ cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
+ cmd->peer_listen_intval, cmd->peer_ht_caps,
+ cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
+ cmd->peer_mpdu_density,
+ cmd->peer_vht_caps, cmd->peer_he_cap_info,
+ cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
+ cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
+ cmd->peer_he_cap_phy[2],
+ cmd->peer_bw_rxnss_override);
+
+ return ret;
+}
+
+void ath11k_wmi_start_scan_init(struct ath11k *ar,
+ struct scan_req_params *arg)
+{
+ /* setup commonly used values */
+ arg->scan_req_id = 1;
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
+ else
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ arg->dwell_time_active = 50;
+ arg->dwell_time_active_2g = 0;
+ arg->dwell_time_passive = 150;
+ arg->dwell_time_active_6g = 40;
+ arg->dwell_time_passive_6g = 30;
+ arg->min_rest_time = 50;
+ arg->max_rest_time = 500;
+ arg->repeat_probe_time = 0;
+ arg->probe_spacing_time = 0;
+ arg->idle_time = 0;
+ arg->max_scan_time = 20000;
+ arg->probe_delay = 5;
+ arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
+ WMI_SCAN_EVENT_COMPLETED |
+ WMI_SCAN_EVENT_BSS_CHANNEL |
+ WMI_SCAN_EVENT_FOREIGN_CHAN |
+ WMI_SCAN_EVENT_DEQUEUED;
+ arg->scan_f_chan_stat_evnt = 1;
+
+ if (test_bit(WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE,
+ ar->ab->wmi_ab.svc_map))
+ arg->scan_ctrl_flags_ext |=
+ WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE;
+
+ arg->num_bssid = 1;
+
+ /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
+ * ZEROs in probe request
+ */
+ eth_broadcast_addr(arg->bssid_list[0].addr);
+}
+
+static inline void
+ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
+ struct scan_req_params *param)
+{
+ /* Scan events subscription */
+ if (param->scan_ev_started)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_STARTED;
+ if (param->scan_ev_completed)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_COMPLETED;
+ if (param->scan_ev_bss_chan)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_BSS_CHANNEL;
+ if (param->scan_ev_foreign_chan)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN;
+ if (param->scan_ev_dequeued)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_DEQUEUED;
+ if (param->scan_ev_preempted)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_PREEMPTED;
+ if (param->scan_ev_start_failed)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_START_FAILED;
+ if (param->scan_ev_restarted)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_RESTARTED;
+ if (param->scan_ev_foreign_chn_exit)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT;
+ if (param->scan_ev_suspended)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_SUSPENDED;
+ if (param->scan_ev_resumed)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_RESUMED;
+
+ /** Set scan control flags */
+ cmd->scan_ctrl_flags = 0;
+ if (param->scan_f_passive)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+ if (param->scan_f_strict_passive_pch)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN;
+ if (param->scan_f_promisc_mode)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROMISCUOS;
+ if (param->scan_f_capture_phy_err)
+ cmd->scan_ctrl_flags |= WMI_SCAN_CAPTURE_PHY_ERROR;
+ if (param->scan_f_half_rate)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_HALF_RATE_SUPPORT;
+ if (param->scan_f_quarter_rate)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT;
+ if (param->scan_f_cck_rates)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
+ if (param->scan_f_ofdm_rates)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
+ if (param->scan_f_chan_stat_evnt)
+ cmd->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ if (param->scan_f_filter_prb_req)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ if (param->scan_f_bcast_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_BCAST_PROBE_REQ;
+ if (param->scan_f_offchan_mgmt_tx)
+ cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_MGMT_TX;
+ if (param->scan_f_offchan_data_tx)
+ cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_DATA_TX;
+ if (param->scan_f_force_active_dfs_chn)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS;
+ if (param->scan_f_add_tpc_ie_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ;
+ if (param->scan_f_add_ds_ie_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ;
+ if (param->scan_f_add_spoofed_mac_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ;
+ if (param->scan_f_add_rand_seq_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ;
+ if (param->scan_f_en_ie_whitelist_in_probe)
+ cmd->scan_ctrl_flags |=
+ WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ;
+
+ /* for adaptive scan mode using 3 bits (21 - 23 bits) */
+ WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags,
+ param->adaptive_dwell_time_mode);
+
+ cmd->scan_ctrl_flags_ext = param->scan_ctrl_flags_ext;
+}
+
+int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
+ struct scan_req_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_start_scan_cmd *cmd;
+ struct wmi_ssid *ssid = NULL;
+ struct wmi_mac_addr *bssid;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int i, ret, len;
+ u32 *tmp_ptr;
+ u16 extraie_len_with_pad = 0;
+ struct hint_short_ssid *s_ssid = NULL;
+ struct hint_bssid *hint_bssid = NULL;
+
+ len = sizeof(*cmd);
+
+ len += TLV_HDR_SIZE;
+ if (params->num_chan)
+ len += params->num_chan * sizeof(u32);
+
+ len += TLV_HDR_SIZE;
+ if (params->num_ssids)
+ len += params->num_ssids * sizeof(*ssid);
+
+ len += TLV_HDR_SIZE;
+ if (params->num_bssid)
+ len += sizeof(*bssid) * params->num_bssid;
+
+ len += TLV_HDR_SIZE;
+ if (params->extraie.len && params->extraie.len <= 0xFFFF)
+ extraie_len_with_pad =
+ roundup(params->extraie.len, sizeof(u32));
+ len += extraie_len_with_pad;
+
+ if (params->num_hint_bssid)
+ len += TLV_HDR_SIZE +
+ params->num_hint_bssid * sizeof(struct hint_bssid);
+
+ if (params->num_hint_s_ssid)
+ len += TLV_HDR_SIZE +
+ params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->scan_id = params->scan_id;
+ cmd->scan_req_id = params->scan_req_id;
+ cmd->vdev_id = params->vdev_id;
+ cmd->scan_priority = params->scan_priority;
+ cmd->notify_scan_events = params->notify_scan_events;
+
+ ath11k_wmi_copy_scan_event_cntrl_flags(cmd, params);
+
+ cmd->dwell_time_active = params->dwell_time_active;
+ cmd->dwell_time_active_2g = params->dwell_time_active_2g;
+ cmd->dwell_time_passive = params->dwell_time_passive;
+ cmd->dwell_time_active_6g = params->dwell_time_active_6g;
+ cmd->dwell_time_passive_6g = params->dwell_time_passive_6g;
+ cmd->min_rest_time = params->min_rest_time;
+ cmd->max_rest_time = params->max_rest_time;
+ cmd->repeat_probe_time = params->repeat_probe_time;
+ cmd->probe_spacing_time = params->probe_spacing_time;
+ cmd->idle_time = params->idle_time;
+ cmd->max_scan_time = params->max_scan_time;
+ cmd->probe_delay = params->probe_delay;
+ cmd->burst_duration = params->burst_duration;
+ cmd->num_chan = params->num_chan;
+ cmd->num_bssid = params->num_bssid;
+ cmd->num_ssids = params->num_ssids;
+ cmd->ie_len = params->extraie.len;
+ cmd->n_probes = params->n_probes;
+ ether_addr_copy(cmd->mac_addr.addr, params->mac_addr.addr);
+ ether_addr_copy(cmd->mac_mask.addr, params->mac_mask.addr);
+
+ ptr += sizeof(*cmd);
+
+ len = params->num_chan * sizeof(u32);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+ tmp_ptr = ptr;
+
+ for (i = 0; i < params->num_chan; ++i)
+ tmp_ptr[i] = params->chan_list[i];
+
+ ptr += len;
+
+ len = params->num_ssids * sizeof(*ssid);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+
+ if (params->num_ssids) {
+ ssid = ptr;
+ for (i = 0; i < params->num_ssids; ++i) {
+ ssid->ssid_len = params->ssid[i].length;
+ memcpy(ssid->ssid, params->ssid[i].ssid,
+ params->ssid[i].length);
+ ssid++;
+ }
+ }
+
+ ptr += (params->num_ssids * sizeof(*ssid));
+ len = params->num_bssid * sizeof(*bssid);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+ bssid = ptr;
+
+ if (params->num_bssid) {
+ for (i = 0; i < params->num_bssid; ++i) {
+ ether_addr_copy(bssid->addr,
+ params->bssid_list[i].addr);
+ bssid++;
+ }
+ }
+
+ ptr += params->num_bssid * sizeof(*bssid);
+
+ len = extraie_len_with_pad;
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+
+ if (extraie_len_with_pad)
+ memcpy(ptr, params->extraie.ptr,
+ params->extraie.len);
+
+ ptr += extraie_len_with_pad;
+
+ if (params->num_hint_s_ssid) {
+ len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+ s_ssid = ptr;
+ for (i = 0; i < params->num_hint_s_ssid; ++i) {
+ s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags;
+ s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid;
+ s_ssid++;
+ }
+ ptr += len;
+ }
+
+ if (params->num_hint_bssid) {
+ len = params->num_hint_bssid * sizeof(struct hint_bssid);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+ hint_bssid = ptr;
+ for (i = 0; i < params->num_hint_bssid; ++i) {
+ hint_bssid->freq_flags =
+ params->hint_bssid[i].freq_flags;
+ ether_addr_copy(&params->hint_bssid[i].bssid.addr[0],
+ &hint_bssid->bssid.addr[0]);
+ hint_bssid++;
+ }
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_START_SCAN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd start scan");
+
+ return ret;
+}
+
+int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar,
+ u32 vdev_id,
+ struct ath11k_reg_tpc_power_info *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_set_tpc_power_cmd *cmd;
+ struct wmi_vdev_ch_power_info *ch;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ u8 *ptr;
+ int i, ret, len, array_len;
+
+ array_len = sizeof(*ch) * param->num_pwr_levels;
+ len = sizeof(*cmd) + TLV_HDR_SIZE + array_len;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = (struct wmi_vdev_set_tpc_power_cmd *)ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_TPC_POWER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->psd_power = param->is_psd_power;
+ cmd->eirp_power = param->eirp_power;
+ cmd->power_type_6ghz = param->ap_power_type;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "tpc vdev id %d is psd power %d eirp power %d 6 ghz power type %d\n",
+ vdev_id, param->is_psd_power, param->eirp_power, param->ap_power_type);
+
+ ptr += sizeof(*cmd);
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, array_len);
+
+ ptr += TLV_HDR_SIZE;
+ ch = (struct wmi_vdev_ch_power_info *)ptr;
+
+ for (i = 0; i < param->num_pwr_levels; i++, ch++) {
+ ch->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_CH_POWER_INFO) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*ch) - TLV_HDR_SIZE);
+
+ ch->chan_cfreq = param->chan_power_info[i].chan_cfreq;
+ ch->tx_power = param->chan_power_info[i].tx_power;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tpc chan freq %d TX power %d\n",
+ ch->chan_cfreq, ch->tx_power);
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_TPC_POWER_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_VDEV_SET_TPC_POWER_CMDID\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
+ struct scan_cancel_param *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_stop_scan_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_stop_scan_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+ cmd->requestor = param->requester;
+ cmd->scan_id = param->scan_id;
+ cmd->pdev_id = param->pdev_id;
+ /* stop the scan with the corresponding scan_id */
+ if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
+ /* Cancelling all scans */
+ cmd->req_type = WMI_SCAN_STOP_ALL;
+ } else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
+ /* Cancelling VAP scans */
+ cmd->req_type = WMI_SCN_STOP_VAP_ALL;
+ } else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) {
+ /* Cancelling specific scan */
+ cmd->req_type = WMI_SCAN_STOP_ONE;
+ } else {
+ ath11k_warn(ar->ab, "invalid scan cancel param %d",
+ param->req_type);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_STOP_SCAN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd stop scan");
+
+ return ret;
+}
+
+int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
+ struct scan_chan_list_params *chan_list)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_scan_chan_list_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_channel *chan_info;
+ struct channel_param *tchan_info;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int i, ret, len;
+ u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
+ u32 *reg1, *reg2;
+
+ tchan_info = chan_list->ch_param;
+ while (chan_list->nallchans) {
+ len = sizeof(*cmd) + TLV_HDR_SIZE;
+ max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
+ sizeof(*chan_info);
+
+ if (chan_list->nallchans > max_chan_limit)
+ num_send_chans = max_chan_limit;
+ else
+ num_send_chans = chan_list->nallchans;
+
+ chan_list->nallchans -= num_send_chans;
+ len += sizeof(*chan_info) * num_send_chans;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = chan_list->pdev_id;
+ cmd->num_scan_chans = num_send_chans;
+ if (num_sends)
+ cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
+ num_send_chans, len, cmd->pdev_id, num_sends);
+
+ ptr = skb->data + sizeof(*cmd);
+
+ len = sizeof(*chan_info) * num_send_chans;
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ ptr += TLV_HDR_SIZE;
+
+ for (i = 0; i < num_send_chans; ++i) {
+ chan_info = ptr;
+ memset(chan_info, 0, sizeof(*chan_info));
+ len = sizeof(*chan_info);
+ chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_CHANNEL) |
+ FIELD_PREP(WMI_TLV_LEN,
+ len - TLV_HDR_SIZE);
+
+ reg1 = &chan_info->reg_info_1;
+ reg2 = &chan_info->reg_info_2;
+ chan_info->mhz = tchan_info->mhz;
+ chan_info->band_center_freq1 = tchan_info->cfreq1;
+ chan_info->band_center_freq2 = tchan_info->cfreq2;
+
+ if (tchan_info->is_chan_passive)
+ chan_info->info |= WMI_CHAN_INFO_PASSIVE;
+ if (tchan_info->allow_he)
+ chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
+ else if (tchan_info->allow_vht)
+ chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
+ else if (tchan_info->allow_ht)
+ chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
+ if (tchan_info->half_rate)
+ chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
+ if (tchan_info->quarter_rate)
+ chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
+ if (tchan_info->psc_channel)
+ chan_info->info |= WMI_CHAN_INFO_PSC;
+ if (tchan_info->dfs_set)
+ chan_info->info |= WMI_CHAN_INFO_DFS;
+
+ chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
+ tchan_info->phy_mode);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
+ tchan_info->minpower);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
+ tchan_info->maxpower);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
+ tchan_info->maxregpower);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
+ tchan_info->reg_class_id);
+ *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
+ tchan_info->antennamax);
+ *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
+ tchan_info->maxregpower);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "chan scan list chan[%d] = %u, chan_info->info %8x\n",
+ i, chan_info->mhz, chan_info->info);
+
+ ptr += sizeof(*chan_info);
+
+ tchan_info++;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd scan chan list channels %d",
+ num_send_chans);
+
+ num_sends++;
+ }
+
+ return 0;
+}
+
+int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
+ struct wmi_wmm_params_all_arg *param,
+ enum wmi_wmm_params_type wmm_param_type)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_set_wmm_params_cmd *cmd;
+ struct wmi_wmm_params *wmm_param;
+ struct wmi_wmm_params_arg *wmi_wmm_arg;
+ struct sk_buff *skb;
+ int ret, ac;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->wmm_param_type = wmm_param_type;
+
+ for (ac = 0; ac < WME_NUM_AC; ac++) {
+ switch (ac) {
+ case WME_AC_BE:
+ wmi_wmm_arg = &param->ac_be;
+ break;
+ case WME_AC_BK:
+ wmi_wmm_arg = &param->ac_bk;
+ break;
+ case WME_AC_VI:
+ wmi_wmm_arg = &param->ac_vi;
+ break;
+ case WME_AC_VO:
+ wmi_wmm_arg = &param->ac_vo;
+ break;
+ }
+
+ wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
+ wmm_param->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*wmm_param) - TLV_HDR_SIZE);
+
+ wmm_param->aifs = wmi_wmm_arg->aifs;
+ wmm_param->cwmin = wmi_wmm_arg->cwmin;
+ wmm_param->cwmax = wmi_wmm_arg->cwmax;
+ wmm_param->txoplimit = wmi_wmm_arg->txop;
+ wmm_param->acm = wmi_wmm_arg->acm;
+ wmm_param->no_ack = wmi_wmm_arg->no_ack;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmm set type %d ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
+ wmm_param_type, ac, wmm_param->aifs, wmm_param->cwmin,
+ wmm_param->cwmax, wmm_param->txoplimit,
+ wmm_param->acm, wmm_param->no_ack);
+ }
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_SET_WMM_PARAMS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev set wmm params");
+
+ return ret;
+}
+
+int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
+ u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_dfs_phyerr_offload_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev dfs phyerr offload enable pdev id %d\n", pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_delba_send_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_delba_send_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DELBA_SEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = tid;
+ cmd->initiator = initiator;
+ cmd->reasoncode = reason;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_DELBA_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
+ vdev_id, mac, tid, initiator, reason);
+
+ return ret;
+}
+
+int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_addba_setresponse_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SETRESPONSE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = tid;
+ cmd->statuscode = status;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
+ vdev_id, mac, tid, status);
+
+ return ret;
+}
+
+int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_addba_send_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_send_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = tid;
+ cmd->buffersize = buf_size;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
+ vdev_id, mac, tid, buf_size);
+
+ return ret;
+}
+
+int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_addba_clear_resp_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_CLEAR_RESP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd addba clear resp vdev_id 0x%X mac_addr %pM\n",
+ vdev_id, mac);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_pktlog_filter_cmd *cmd;
+ struct wmi_pdev_pktlog_filter_info *info;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int ret, len;
+
+ len = sizeof(*cmd) + sizeof(*info) + TLV_HDR_SIZE;
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_pktlog_filter_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
+ cmd->num_mac = 1;
+ cmd->enable = enable;
+
+ ptr = skb->data + sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*info));
+
+ ptr += TLV_HDR_SIZE;
+ info = ptr;
+
+ ether_addr_copy(info->peer_macaddr.addr, addr);
+ info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*info) - TLV_HDR_SIZE);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_PKTLOG_FILTER_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog filter");
+
+ return ret;
+}
+
+int
+ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
+ struct wmi_init_country_params init_cc_params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_init_country_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_country_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_SET_INIT_COUNTRY_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = ar->pdev->pdev_id;
+
+ switch (init_cc_params.flags) {
+ case ALPHA_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
+ memcpy((u8 *)&cmd->cc_info.alpha2,
+ init_cc_params.cc_info.alpha2, 3);
+ break;
+ case CC_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE;
+ cmd->cc_info.country_code = init_cc_params.cc_info.country_code;
+ break;
+ case REGDMN_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_REGDOMAIN;
+ cmd->cc_info.regdom_id = init_cc_params.cc_info.regdom_id;
+ break;
+ default:
+ ath11k_warn(ar->ab, "unknown cc params flags: 0x%x",
+ init_cc_params.flags);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_SET_INIT_COUNTRY_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
+ ret);
+ goto err;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd set init country");
+
+ return 0;
+
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar,
+ struct wmi_set_current_country_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_set_current_country_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_current_country_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_CURRENT_COUNTRY_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(&cmd->new_alpha2, &param->alpha2, 3);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd set current country pdev id %d alpha2 %c%c\n",
+ ar->pdev->pdev_id,
+ param->alpha2[0],
+ param->alpha2[1]);
+
+ return ret;
+}
+
+int
+ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
+ struct thermal_mitigation_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_therm_throt_config_request_cmd *cmd;
+ struct wmi_therm_throt_level_config_info *lvl_conf;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int i, ret, len;
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE +
+ THERMAL_LEVELS * sizeof(struct wmi_therm_throt_level_config_info);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_therm_throt_config_request_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_CONFIG_REQUEST) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = ar->pdev->pdev_id;
+ cmd->enable = param->enable;
+ cmd->dc = param->dc;
+ cmd->dc_per_event = param->dc_per_event;
+ cmd->therm_throt_levels = THERMAL_LEVELS;
+
+ tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN,
+ (THERMAL_LEVELS *
+ sizeof(struct wmi_therm_throt_level_config_info)));
+
+ lvl_conf = (struct wmi_therm_throt_level_config_info *)(skb->data +
+ sizeof(*cmd) +
+ TLV_HDR_SIZE);
+ for (i = 0; i < THERMAL_LEVELS; i++) {
+ lvl_conf->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*lvl_conf) - TLV_HDR_SIZE);
+
+ lvl_conf->temp_lwm = param->levelconf[i].tmplwm;
+ lvl_conf->temp_hwm = param->levelconf[i].tmphwm;
+ lvl_conf->dc_off_percent = param->levelconf[i].dcoffpercent;
+ lvl_conf->prio = param->levelconf[i].priority;
+ lvl_conf++;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_THERM_THROT_SET_CONF_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send THERM_THROT_SET_CONF cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd therm throt set conf pdev_id %d enable %d dc %d dc_per_event %x levels %d\n",
+ ar->pdev->pdev_id, param->enable, param->dc,
+ param->dc_per_event, THERMAL_LEVELS);
+
+ return ret;
+}
+
+int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar,
+ struct wmi_11d_scan_start_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_11d_scan_start_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+ cmd->scan_period_msec = param->scan_period_msec;
+ cmd->start_interval_msec = param->start_interval_msec;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd 11d scan start vdev id %d period %d ms internal %d ms\n",
+ cmd->vdev_id,
+ cmd->scan_period_msec,
+ cmd->start_interval_msec);
+
+ return ret;
+}
+
+int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_11d_scan_stop_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_STOP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd 11d scan stop vdev id %d\n",
+ cmd->vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pktlog_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pktlog_enable_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
+ cmd->evlist = pktlog_filter;
+ cmd->enable = ATH11K_WMI_PKTLOG_ENABLE_FORCE;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog enable");
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pktlog_disable_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pktlog_disable_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_DISABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog disable");
+
+ return ret;
+}
+
+void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params)
+{
+ twt_params->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
+ twt_params->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
+ twt_params->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
+ twt_params->congestion_thresh_teardown =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
+ twt_params->congestion_thresh_critical =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
+ twt_params->interference_thresh_teardown =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
+ twt_params->interference_thresh_setup =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
+ twt_params->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
+ twt_params->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
+ twt_params->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
+ twt_params->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
+ twt_params->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
+ twt_params->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
+ twt_params->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
+ twt_params->remove_sta_slot_interval =
+ ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
+ /* TODO add MBSSID support */
+ twt_params->mbss_support = 0;
+}
+
+int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id,
+ struct wmi_twt_enable_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_enable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+ cmd->sta_cong_timer_ms = params->sta_cong_timer_ms;
+ cmd->default_slot_size = params->default_slot_size;
+ cmd->congestion_thresh_setup = params->congestion_thresh_setup;
+ cmd->congestion_thresh_teardown = params->congestion_thresh_teardown;
+ cmd->congestion_thresh_critical = params->congestion_thresh_critical;
+ cmd->interference_thresh_teardown = params->interference_thresh_teardown;
+ cmd->interference_thresh_setup = params->interference_thresh_setup;
+ cmd->min_no_sta_setup = params->min_no_sta_setup;
+ cmd->min_no_sta_teardown = params->min_no_sta_teardown;
+ cmd->no_of_bcast_mcast_slots = params->no_of_bcast_mcast_slots;
+ cmd->min_no_twt_slots = params->min_no_twt_slots;
+ cmd->max_no_sta_twt = params->max_no_sta_twt;
+ cmd->mode_check_interval = params->mode_check_interval;
+ cmd->add_sta_slot_interval = params->add_sta_slot_interval;
+ cmd->remove_sta_slot_interval = params->remove_sta_slot_interval;
+ cmd->mbss_support = params->mbss_support;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ar->twt_enabled = 1;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd twt enable");
+
+ return 0;
+}
+
+int
+ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_disable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DISABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd twt disable");
+
+ ar->twt_enabled = 0;
+
+ return 0;
+}
+
+int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_add_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_add_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+ cmd->wake_intvl_us = params->wake_intvl_us;
+ cmd->wake_intvl_mantis = params->wake_intvl_mantis;
+ cmd->wake_dura_us = params->wake_dura_us;
+ cmd->sp_offset_us = params->sp_offset_us;
+ cmd->flags = params->twt_cmd;
+ if (params->flag_bcast)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST;
+ if (params->flag_trigger)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER;
+ if (params->flag_flow_type)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE;
+ if (params->flag_protection)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to add twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd twt add dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n",
+ cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us,
+ cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us,
+ cmd->flags);
+
+ return 0;
+}
+
+int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_del_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_del_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to delete twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd twt del dialog vdev %u dialog id %u\n",
+ cmd->vdev_id, cmd->dialog_id);
+
+ return 0;
+}
+
+int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_pause_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_pause_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_TWT_PAUSE_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to pause twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd twt pause dialog vdev %u dialog id %u\n",
+ cmd->vdev_id, cmd->dialog_id);
+
+ return 0;
+}
+
+int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_resume_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_resume_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_TWT_RESUME_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+ cmd->sp_offset_us = params->sp_offset_us;
+ cmd->next_twt_size = params->next_twt_size;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to resume twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd twt resume dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n",
+ cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us,
+ cmd->next_twt_size);
+
+ return 0;
+}
+
+int
+ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_obss_spatial_reuse_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->enable = he_obss_pd->enable;
+ cmd->obss_min = he_obss_pd->min_offset;
+ cmd->obss_max = he_obss_pd->max_offset;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd pdev obss pd spatial reuse");
+
+ return 0;
+}
+
+int
+ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev set srg bss color bitmap pdev_id %d bss color bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ return 0;
+}
+
+int
+ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev set srg partial bssid bitmap pdev_id %d partial bssid bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ return 0;
+}
+
+int
+ath11k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev set srg obsscolor enable pdev_id %d bss color enable bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ return 0;
+}
+
+int
+ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev set srg obss bssid enable bitmap pdev_id %d bssid enable bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ return 0;
+}
+
+int
+ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev set non srg obss color enable bitmap pdev_id %d bss color enable bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ return 0;
+}
+
+int
+ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev set non srg obss bssid enable bitmap pdev_id %d bssid enable bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ return 0;
+}
+
+int
+ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
+ u8 bss_color, u32 period,
+ bool enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_obss_color_collision_cfg_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->evt_type = enable ? ATH11K_OBSS_COLOR_COLLISION_DETECTION :
+ ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE;
+ cmd->current_bss_color = bss_color;
+ cmd->detection_period_ms = period;
+ cmd->scan_period_ms = ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS;
+ cmd->free_slot_expiry_time_ms = 0;
+ cmd->flags = 0;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd obss color collision det config id %d type %d bss_color %d detect_period %d scan_period %d\n",
+ cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
+ cmd->detection_period_ms, cmd->scan_period_ms);
+
+ return 0;
+}
+
+int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
+ bool enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_bss_color_change_enable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BSS_COLOR_CHANGE_ENABLE) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->enable = enable ? 1 : 0;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd bss color change enable id %d enable %d\n",
+ cmd->vdev_id, cmd->enable);
+
+ return 0;
+}
+
+int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct sk_buff *tmpl)
+{
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+ size_t aligned_len;
+ struct wmi_fils_discovery_tmpl_cmd *cmd;
+
+ aligned_len = roundup(tmpl->len, 4);
+ len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "vdev %i set FILS discovery template\n", vdev_id);
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_FILS_DISCOVERY_TMPL_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->buf_len = tmpl->len;
+ ptr = skb->data + sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, aligned_len);
+ memcpy(tlv->value, tmpl->data, tmpl->len);
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "WMI vdev %i failed to send FILS discovery template command\n",
+ vdev_id);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd fils discovery tmpl");
+
+ return 0;
+}
+
+int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct sk_buff *tmpl)
+{
+ struct wmi_probe_tmpl_cmd *cmd;
+ struct wmi_bcn_prb_info *probe_info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+ size_t aligned_len = roundup(tmpl->len, 4);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "vdev %i set probe response template\n", vdev_id);
+
+ len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PRB_TMPL_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->buf_len = tmpl->len;
+
+ ptr = skb->data + sizeof(*cmd);
+
+ probe_info = ptr;
+ len = sizeof(*probe_info);
+ probe_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_BCN_PRB_INFO) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ probe_info->caps = 0;
+ probe_info->erp = 0;
+
+ ptr += sizeof(*probe_info);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, aligned_len);
+ memcpy(tlv->value, tmpl->data, tmpl->len);
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "WMI vdev %i failed to send probe response template command\n",
+ vdev_id);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd ");
+
+ return 0;
+}
+
+int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval,
+ bool unsol_bcast_probe_resp_enabled)
+{
+ struct sk_buff *skb;
+ int ret, len;
+ struct wmi_fils_discovery_cmd *cmd;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "vdev %i set %s interval to %u TU\n",
+ vdev_id, unsol_bcast_probe_resp_enabled ?
+ "unsolicited broadcast probe response" : "FILS discovery",
+ interval);
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_fils_discovery_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ENABLE_FILS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->interval = interval;
+ cmd->config = unsol_bcast_probe_resp_enabled;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "WMI vdev %i failed to send FILS discovery enable/disable command\n",
+ vdev_id);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd enable fils");
+
+ return 0;
+}
+
+static void
+ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_obss_color_collision_event *ev;
+ struct ath11k_vif *arvif;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event obss color collision");
+
+ rcu_read_lock();
+
+ ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch obss color collision ev");
+ goto exit;
+ }
+
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "failed to find arvif with vedv id %d in obss_color_collision_event\n",
+ ev->vdev_id);
+ goto exit;
+ }
+
+ switch (ev->evt_type) {
+ case WMI_BSS_COLOR_COLLISION_DETECTION:
+ ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
+ 0);
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
+ ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
+ break;
+ case WMI_BSS_COLOR_COLLISION_DISABLE:
+ case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY:
+ case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE:
+ break;
+ default:
+ ath11k_warn(ab, "received unknown obss color collision detection event\n");
+ }
+
+exit:
+ kfree(tb);
+ rcu_read_unlock();
+}
+
+static void
+ath11k_fill_band_to_mac_param(struct ath11k_base *soc,
+ struct wmi_host_pdev_band_to_mac *band_to_mac)
+{
+ u8 i;
+ struct ath11k_hal_reg_capabilities_ext *hal_reg_cap;
+ struct ath11k_pdev *pdev;
+
+ for (i = 0; i < soc->num_radios; i++) {
+ pdev = &soc->pdevs[i];
+ hal_reg_cap = &soc->hal_reg_cap[i];
+ band_to_mac[i].pdev_id = pdev->pdev_id;
+
+ switch (pdev->cap.supported_bands) {
+ case WMI_HOST_WLAN_2G_5G_CAP:
+ band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
+ band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
+ break;
+ case WMI_HOST_WLAN_2G_CAP:
+ band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
+ band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan;
+ break;
+ case WMI_HOST_WLAN_5G_CAP:
+ band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan;
+ band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void
+ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
+ struct target_resource_config *tg_cfg)
+{
+ wmi_cfg->num_vdevs = tg_cfg->num_vdevs;
+ wmi_cfg->num_peers = tg_cfg->num_peers;
+ wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers;
+ wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs;
+ wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys;
+ wmi_cfg->num_tids = tg_cfg->num_tids;
+ wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit;
+ wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask;
+ wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask;
+ wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0];
+ wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1];
+ wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2];
+ wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3];
+ wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode;
+ wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req;
+ wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev;
+ wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev;
+ wmi_cfg->roam_offload_max_ap_profiles =
+ tg_cfg->roam_offload_max_ap_profiles;
+ wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups;
+ wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems;
+ wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode;
+ wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size;
+ wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries;
+ wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size;
+ wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim;
+ wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
+ tg_cfg->rx_skip_defrag_timeout_dup_detection_check;
+ wmi_cfg->vow_config = tg_cfg->vow_config;
+ wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev;
+ wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc;
+ wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries;
+ wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs;
+ wmi_cfg->num_tdls_conn_table_entries =
+ tg_cfg->num_tdls_conn_table_entries;
+ wmi_cfg->beacon_tx_offload_max_vdev =
+ tg_cfg->beacon_tx_offload_max_vdev;
+ wmi_cfg->num_multicast_filter_entries =
+ tg_cfg->num_multicast_filter_entries;
+ wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters;
+ wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern;
+ wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size;
+ wmi_cfg->max_tdls_concurrent_sleep_sta =
+ tg_cfg->max_tdls_concurrent_sleep_sta;
+ wmi_cfg->max_tdls_concurrent_buffer_sta =
+ tg_cfg->max_tdls_concurrent_buffer_sta;
+ wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate;
+ wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs;
+ wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels;
+ wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules;
+ wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
+ wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
+ wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
+ wmi_cfg->flag1 = tg_cfg->flag1;
+ wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
+ wmi_cfg->sched_params = tg_cfg->sched_params;
+ wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
+ wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count;
+ wmi_cfg->host_service_flags &=
+ ~(1 << WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
+ wmi_cfg->host_service_flags |= (tg_cfg->is_reg_cc_ext_event_supported <<
+ WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
+ wmi_cfg->flags2 = WMI_RSRC_CFG_FLAG2_CALC_NEXT_DTIM_COUNT_SET;
+ wmi_cfg->ema_max_vap_cnt = tg_cfg->ema_max_vap_cnt;
+ wmi_cfg->ema_max_profile_period = tg_cfg->ema_max_profile_period;
+}
+
+static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi,
+ struct wmi_init_cmd_param *param)
+{
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct sk_buff *skb;
+ struct wmi_init_cmd *cmd;
+ struct wmi_resource_config *cfg;
+ struct wmi_pdev_set_hw_mode_cmd_param *hw_mode;
+ struct wmi_pdev_band_to_mac *band_to_mac;
+ struct wlan_host_mem_chunk *host_mem_chunks;
+ struct wmi_tlv *tlv;
+ size_t ret, len;
+ void *ptr;
+ u32 hw_mode_len = 0;
+ u16 idx;
+
+ if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX)
+ hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
+ (param->num_band_to_mac * sizeof(*band_to_mac));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
+ (param->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ptr = skb->data + sizeof(*cmd);
+ cfg = ptr;
+
+ ath11k_wmi_copy_resource_config(cfg, param->res_cfg);
+
+ cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE);
+
+ ptr += sizeof(*cfg);
+ host_mem_chunks = ptr + TLV_HDR_SIZE;
+ len = sizeof(struct wlan_host_mem_chunk);
+
+ for (idx = 0; idx < param->num_mem_chunks; ++idx) {
+ host_mem_chunks[idx].tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WLAN_HOST_MEMORY_CHUNK) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr;
+ host_mem_chunks[idx].size = param->mem_chunks[idx].len;
+ host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "host mem chunk req_id %d paddr 0x%llx len %d\n",
+ param->mem_chunks[idx].req_id,
+ (u64)param->mem_chunks[idx].paddr,
+ param->mem_chunks[idx].len);
+ }
+ cmd->num_host_mem_chunks = param->num_mem_chunks;
+ len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks;
+
+ /* num_mem_chunks is zero */
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE + len;
+
+ if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
+ hw_mode = ptr;
+ hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SET_HW_MODE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*hw_mode) - TLV_HDR_SIZE);
+
+ hw_mode->hw_mode_index = param->hw_mode_id;
+ hw_mode->num_band_to_mac = param->num_band_to_mac;
+
+ ptr += sizeof(*hw_mode);
+
+ len = param->num_band_to_mac * sizeof(*band_to_mac);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+ len = sizeof(*band_to_mac);
+
+ for (idx = 0; idx < param->num_band_to_mac; idx++) {
+ band_to_mac = ptr;
+
+ band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_BAND_TO_MAC) |
+ FIELD_PREP(WMI_TLV_LEN,
+ len - TLV_HDR_SIZE);
+ band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id;
+ band_to_mac->start_freq =
+ param->band_to_mac[idx].start_freq;
+ band_to_mac->end_freq =
+ param->band_to_mac[idx].end_freq;
+ ptr += sizeof(*band_to_mac);
+ }
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "failed to send WMI_INIT_CMDID\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd wmi init");
+
+ return 0;
+}
+
+int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar,
+ int pdev_id)
+{
+ struct ath11k_wmi_pdev_lro_config_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath11k_wmi_pdev_lro_config_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_LRO_INFO_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ get_random_bytes(cmd->th_4, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE);
+ get_random_bytes(cmd->th_6, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE);
+
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send lro cfg req wmi cmd\n");
+ goto err;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd lro config pdev_id 0x%x\n", pdev_id);
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
+ enum wmi_host_hw_mode_config_type mode)
+{
+ struct wmi_pdev_set_hw_mode_cmd_param *cmd;
+ struct sk_buff *skb;
+ struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab;
+ int len;
+ int ret;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = WMI_PDEV_ID_SOC;
+ cmd->hw_mode_index = mode;
+
+ ret = ath11k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd pdev set hw mode %d", cmd->hw_mode_index);
+
+ return 0;
+}
+
+int ath11k_wmi_cmd_init(struct ath11k_base *ab)
+{
+ struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab;
+ struct wmi_init_cmd_param init_param;
+ struct target_resource_config config;
+
+ memset(&init_param, 0, sizeof(init_param));
+ memset(&config, 0, sizeof(config));
+
+ ab->hw_params.hw_ops->wmi_init_config(ab, &config);
+
+ if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
+ ab->wmi_ab.svc_map))
+ config.is_reg_cc_ext_event_supported = 1;
+
+ memcpy(&wmi_ab->wlan_resource_config, &config, sizeof(config));
+
+ init_param.res_cfg = &wmi_ab->wlan_resource_config;
+ init_param.num_mem_chunks = wmi_ab->num_mem_chunks;
+ init_param.hw_mode_id = wmi_ab->preferred_hw_mode;
+ init_param.mem_chunks = wmi_ab->mem_chunks;
+
+ if (ab->hw_params.single_pdev_only)
+ init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
+
+ init_param.num_band_to_mac = ab->num_radios;
+ ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac);
+
+ return ath11k_init_cmd_send(&wmi_ab->wmi[0], &init_param);
+}
+
+int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar,
+ struct ath11k_wmi_vdev_spectral_conf_param *param)
+{
+ struct ath11k_wmi_vdev_spectral_conf_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath11k_wmi_vdev_spectral_conf_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ memcpy(&cmd->param, param, sizeof(*param));
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb,
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send spectral scan config wmi cmd\n");
+ goto err;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd vdev spectral scan configure vdev_id 0x%x\n",
+ param->vdev_id);
+
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id,
+ u32 trigger, u32 enable)
+{
+ struct ath11k_wmi_vdev_spectral_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath11k_wmi_vdev_spectral_enable_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->trigger_cmd = trigger;
+ cmd->enable_cmd = enable;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb,
+ WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send spectral enable wmi cmd\n");
+ goto err;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd vdev spectral scan enable vdev id 0x%x\n",
+ vdev_id);
+
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar,
+ struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param)
+{
+ struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DMA_RING_CFG_REQ) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = param->pdev_id;
+ cmd->module_id = param->module_id;
+ cmd->base_paddr_lo = param->base_paddr_lo;
+ cmd->base_paddr_hi = param->base_paddr_hi;
+ cmd->head_idx_paddr_lo = param->head_idx_paddr_lo;
+ cmd->head_idx_paddr_hi = param->head_idx_paddr_hi;
+ cmd->tail_idx_paddr_lo = param->tail_idx_paddr_lo;
+ cmd->tail_idx_paddr_hi = param->tail_idx_paddr_hi;
+ cmd->num_elems = param->num_elems;
+ cmd->buf_size = param->buf_size;
+ cmd->num_resp_per_event = param->num_resp_per_event;
+ cmd->event_timeout_ms = param->event_timeout_ms;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb,
+ WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send dma ring cfg req wmi cmd\n");
+ goto err;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev dma ring cfg req pdev_id 0x%x\n",
+ param->pdev_id);
+
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int ath11k_wmi_tlv_dma_buf_entry_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_dma_buf_release_parse *parse = data;
+
+ if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
+ return -EPROTO;
+
+ if (parse->num_buf_entry >= parse->fixed.num_buf_release_entry)
+ return -ENOBUFS;
+
+ parse->num_buf_entry++;
+ return 0;
+}
+
+static int ath11k_wmi_tlv_dma_buf_meta_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_dma_buf_release_parse *parse = data;
+
+ if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
+ return -EPROTO;
+
+ if (parse->num_meta >= parse->fixed.num_meta_data_entry)
+ return -ENOBUFS;
+
+ parse->num_meta++;
+ return 0;
+}
+
+static int ath11k_wmi_tlv_dma_buf_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_dma_buf_release_parse *parse = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_DMA_BUF_RELEASE:
+ memcpy(&parse->fixed, ptr,
+ sizeof(struct ath11k_wmi_dma_buf_release_fixed_param));
+ parse->fixed.pdev_id = DP_HW2SW_MACID(parse->fixed.pdev_id);
+ break;
+ case WMI_TAG_ARRAY_STRUCT:
+ if (!parse->buf_entry_done) {
+ parse->num_buf_entry = 0;
+ parse->buf_entry = (struct wmi_dma_buf_release_entry *)ptr;
+
+ ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+ ath11k_wmi_tlv_dma_buf_entry_parse,
+ parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse dma buf entry tlv %d\n",
+ ret);
+ return ret;
+ }
+
+ parse->buf_entry_done = true;
+ } else if (!parse->meta_data_done) {
+ parse->num_meta = 0;
+ parse->meta_data = (struct wmi_dma_buf_release_meta_data *)ptr;
+
+ ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+ ath11k_wmi_tlv_dma_buf_meta_parse,
+ parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse dma buf meta tlv %d\n",
+ ret);
+ return ret;
+ }
+
+ parse->meta_data_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void ath11k_wmi_pdev_dma_ring_buf_release_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_tlv_dma_buf_release_parse parse = { };
+ struct ath11k_dbring_buf_release_event param;
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_dma_buf_parse,
+ &parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev dma ring buf release");
+
+ param.fixed = parse.fixed;
+ param.buf_entry = parse.buf_entry;
+ param.num_buf_entry = parse.num_buf_entry;
+ param.meta_data = parse.meta_data;
+ param.num_meta = parse.num_meta;
+
+ ret = ath11k_dbring_buffer_release_event(ab, &param);
+ if (ret) {
+ ath11k_warn(ab, "failed to handle dma buf release event %d\n", ret);
+ return;
+ }
+}
+
+static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct wmi_hw_mode_capabilities *hw_mode_cap;
+ u32 phy_map = 0;
+
+ if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes)
+ return -ENOBUFS;
+
+ hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities,
+ hw_mode_id);
+ svc_rdy_ext->n_hw_mode_caps++;
+
+ phy_map = hw_mode_cap->phy_id_map;
+ while (phy_map) {
+ svc_rdy_ext->tot_phy_id++;
+ phy_map = phy_map >> 1;
+ }
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base *soc,
+ u16 len, const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct wmi_hw_mode_capabilities *hw_mode_caps;
+ enum wmi_host_hw_mode_config_type mode, pref;
+ u32 i;
+ int ret;
+
+ svc_rdy_ext->n_hw_mode_caps = 0;
+ svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr;
+
+ ret = ath11k_wmi_tlv_iter(soc, ptr, len,
+ ath11k_wmi_tlv_hw_mode_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(soc, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ i = 0;
+ while (i < svc_rdy_ext->n_hw_mode_caps) {
+ hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
+ mode = hw_mode_caps->hw_mode_id;
+ pref = soc->wmi_ab.preferred_hw_mode;
+
+ if (ath11k_hw_mode_pri_map[mode] < ath11k_hw_mode_pri_map[pref]) {
+ svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
+ soc->wmi_ab.preferred_hw_mode = mode;
+ }
+ i++;
+ }
+
+ ath11k_dbg(soc, ATH11K_DBG_WMI, "preferred_hw_mode:%d\n",
+ soc->wmi_ab.preferred_hw_mode);
+ if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+ if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
+ return -ENOBUFS;
+
+ len = min_t(u16, len, sizeof(struct wmi_mac_phy_capabilities));
+ if (!svc_rdy_ext->n_mac_phy_caps) {
+ svc_rdy_ext->mac_phy_caps = kcalloc(svc_rdy_ext->tot_phy_id,
+ len, GFP_ATOMIC);
+ if (!svc_rdy_ext->mac_phy_caps)
+ return -ENOMEM;
+ }
+
+ memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
+ svc_rdy_ext->n_mac_phy_caps++;
+ return 0;
+}
+
+static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+ if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy)
+ return -ENOBUFS;
+
+ svc_rdy_ext->n_ext_hal_reg_caps++;
+ return 0;
+}
+
+static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base *soc,
+ u16 len, const void *ptr, void *data)
+{
+ struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct ath11k_hal_reg_capabilities_ext reg_cap;
+ int ret;
+ u32 i;
+
+ svc_rdy_ext->n_ext_hal_reg_caps = 0;
+ svc_rdy_ext->ext_hal_reg_caps = (struct wmi_hal_reg_capabilities_ext *)ptr;
+ ret = ath11k_wmi_tlv_iter(soc, ptr, len,
+ ath11k_wmi_tlv_ext_hal_reg_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(soc, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < svc_rdy_ext->param.num_phy; i++) {
+ ret = ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle,
+ svc_rdy_ext->soc_hal_reg_caps,
+ svc_rdy_ext->ext_hal_reg_caps, i,
+ &reg_cap);
+ if (ret) {
+ ath11k_warn(soc, "failed to extract reg cap %d\n", i);
+ return ret;
+ }
+
+ memcpy(&soc->hal_reg_cap[reg_cap.phy_id],
+ &reg_cap, sizeof(reg_cap));
+ }
+ return 0;
+}
+
+static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
+ u16 len, const void *ptr,
+ void *data)
+{
+ struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ u8 hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id;
+ u32 phy_id_map;
+ int pdev_index = 0;
+ int ret;
+
+ svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr;
+ svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
+
+ soc->num_radios = 0;
+ soc->target_pdev_count = 0;
+ phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
+
+ while (phy_id_map && soc->num_radios < MAX_RADIOS) {
+ ret = ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
+ svc_rdy_ext->hw_caps,
+ svc_rdy_ext->hw_mode_caps,
+ svc_rdy_ext->soc_hal_reg_caps,
+ svc_rdy_ext->mac_phy_caps,
+ hw_mode_id, soc->num_radios,
+ &soc->pdevs[pdev_index]);
+ if (ret) {
+ ath11k_warn(soc, "failed to extract mac caps, idx :%d\n",
+ soc->num_radios);
+ return ret;
+ }
+
+ soc->num_radios++;
+
+ /* For QCA6390, save mac_phy capability in the same pdev */
+ if (soc->hw_params.single_pdev_only)
+ pdev_index = 0;
+ else
+ pdev_index = soc->num_radios;
+
+ /* TODO: mac_phy_cap prints */
+ phy_id_map >>= 1;
+ }
+
+ /* For QCA6390, set num_radios to 1 because host manages
+ * both 2G and 5G radio in one pdev.
+ * Set pdev_id = 0 and 0 means soc level.
+ */
+ if (soc->hw_params.single_pdev_only) {
+ soc->num_radios = 1;
+ soc->pdevs[0].pdev_id = 0;
+ }
+
+ if (!soc->reg_info_store) {
+ soc->reg_info_store = kcalloc(soc->num_radios,
+ sizeof(*soc->reg_info_store),
+ GFP_ATOMIC);
+ if (!soc->reg_info_store)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_dma_ring_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_dma_ring_caps_parse *parse = data;
+
+ if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
+ return -EPROTO;
+
+ parse->n_dma_ring_caps++;
+ return 0;
+}
+
+static int ath11k_wmi_alloc_dbring_caps(struct ath11k_base *ab,
+ u32 num_cap)
+{
+ size_t sz;
+ void *ptr;
+
+ sz = num_cap * sizeof(struct ath11k_dbring_cap);
+ ptr = kzalloc(sz, GFP_ATOMIC);
+ if (!ptr)
+ return -ENOMEM;
+
+ ab->db_caps = ptr;
+ ab->num_db_cap = num_cap;
+
+ return 0;
+}
+
+static void ath11k_wmi_free_dbring_caps(struct ath11k_base *ab)
+{
+ kfree(ab->db_caps);
+ ab->db_caps = NULL;
+ ab->num_db_cap = 0;
+}
+
+static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base *ab,
+ u16 len, const void *ptr, void *data)
+{
+ struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data;
+ struct wmi_dma_ring_capabilities *dma_caps;
+ struct ath11k_dbring_cap *dir_buff_caps;
+ int ret;
+ u32 i;
+
+ dma_caps_parse->n_dma_ring_caps = 0;
+ dma_caps = (struct wmi_dma_ring_capabilities *)ptr;
+ ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+ ath11k_wmi_tlv_dma_ring_caps_parse,
+ dma_caps_parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
+ return ret;
+ }
+
+ if (!dma_caps_parse->n_dma_ring_caps)
+ return 0;
+
+ if (ab->num_db_cap) {
+ ath11k_warn(ab, "Already processed, so ignoring dma ring caps\n");
+ return 0;
+ }
+
+ ret = ath11k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
+ if (ret)
+ return ret;
+
+ dir_buff_caps = ab->db_caps;
+ for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
+ if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) {
+ ath11k_warn(ab, "Invalid module id %d\n", dma_caps[i].module_id);
+ ret = -EINVAL;
+ goto free_dir_buff;
+ }
+
+ dir_buff_caps[i].id = dma_caps[i].module_id;
+ dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id);
+ dir_buff_caps[i].min_elem = dma_caps[i].min_elem;
+ dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz;
+ dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align;
+ }
+
+ return 0;
+
+free_dir_buff:
+ ath11k_wmi_free_dbring_caps(ab);
+ return ret;
+}
+
+static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_READY_EXT_EVENT:
+ ret = ath11k_pull_svc_ready_ext(wmi_handle, ptr,
+ &svc_rdy_ext->param);
+ if (ret) {
+ ath11k_warn(ab, "unable to extract ext params\n");
+ return ret;
+ }
+ break;
+
+ case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
+ svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr;
+ svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes;
+ break;
+
+ case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
+ ret = ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+ break;
+
+ case WMI_TAG_ARRAY_STRUCT:
+ if (!svc_rdy_ext->hw_mode_done) {
+ ret = ath11k_wmi_tlv_hw_mode_caps(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->hw_mode_done = true;
+ } else if (!svc_rdy_ext->mac_phy_done) {
+ svc_rdy_ext->n_mac_phy_caps = 0;
+ ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+ ath11k_wmi_tlv_mac_phy_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ svc_rdy_ext->mac_phy_done = true;
+ } else if (!svc_rdy_ext->ext_hal_reg_done) {
+ ret = ath11k_wmi_tlv_ext_hal_reg_caps(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->ext_hal_reg_done = true;
+ } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
+ svc_rdy_ext->mac_phy_chainmask_combo_done = true;
+ } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
+ svc_rdy_ext->mac_phy_chainmask_cap_done = true;
+ } else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
+ svc_rdy_ext->oem_dma_ring_cap_done = true;
+ } else if (!svc_rdy_ext->dma_ring_cap_done) {
+ ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr,
+ &svc_rdy_ext->dma_caps_parse);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->dma_ring_cap_done = true;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ath11k_service_ready_ext_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_svc_rdy_ext_parse,
+ &svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ goto err;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready ext");
+
+ if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
+ complete(&ab->wmi_ab.service_ready);
+
+ kfree(svc_rdy_ext.mac_phy_caps);
+ return 0;
+
+err:
+ ath11k_wmi_free_dbring_caps(ab);
+ return ret;
+}
+
+static int ath11k_wmi_tlv_svc_rdy_ext2_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext2_parse *parse = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_ARRAY_STRUCT:
+ if (!parse->dma_ring_cap_done) {
+ ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr,
+ &parse->dma_caps_parse);
+ if (ret)
+ return ret;
+
+ parse->dma_ring_cap_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath11k_service_ready_ext2_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_svc_rdy_ext2_parse,
+ &svc_rdy_ext2);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
+ goto err;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready ext2");
+
+ complete(&ab->wmi_ab.service_ready);
+
+ return 0;
+
+err:
+ ath11k_wmi_free_dbring_caps(ab);
+ return ret;
+}
+
+static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_vdev_start_resp_event *vdev_rsp)
+{
+ const void **tb;
+ const struct wmi_vdev_start_resp_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev start resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ memset(vdev_rsp, 0, sizeof(*vdev_rsp));
+
+ vdev_rsp->vdev_id = ev->vdev_id;
+ vdev_rsp->requestor_id = ev->requestor_id;
+ vdev_rsp->resp_type = ev->resp_type;
+ vdev_rsp->status = ev->status;
+ vdev_rsp->chain_mask = ev->chain_mask;
+ vdev_rsp->smps_mode = ev->smps_mode;
+ vdev_rsp->mac_id = ev->mac_id;
+ vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
+ vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
+ vdev_rsp->max_allowed_tx_power = ev->max_allowed_tx_power;
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath11k_print_reg_rule(struct ath11k_base *ab, const char *band,
+ u32 num_reg_rules,
+ struct cur_reg_rule *reg_rule_ptr)
+{
+ struct cur_reg_rule *reg_rule = reg_rule_ptr;
+ u32 count;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "number of reg rules in %s band: %d\n",
+ band, num_reg_rules);
+
+ for (count = 0; count < num_reg_rules; count++) {
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "reg rule %d: (%d - %d @ %d) (%d, %d) (FLAGS %d)\n",
+ count + 1, reg_rule->start_freq, reg_rule->end_freq,
+ reg_rule->max_bw, reg_rule->ant_gain,
+ reg_rule->reg_power, reg_rule->flags);
+ reg_rule++;
+ }
+}
+
+static struct cur_reg_rule
+*create_reg_rules_from_wmi(u32 num_reg_rules,
+ struct wmi_regulatory_rule_struct *wmi_reg_rule)
+{
+ struct cur_reg_rule *reg_rule_ptr;
+ u32 count;
+
+ reg_rule_ptr = kcalloc(num_reg_rules, sizeof(*reg_rule_ptr),
+ GFP_ATOMIC);
+
+ if (!reg_rule_ptr)
+ return NULL;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ reg_rule_ptr[count].start_freq =
+ FIELD_GET(REG_RULE_START_FREQ,
+ wmi_reg_rule[count].freq_info);
+ reg_rule_ptr[count].end_freq =
+ FIELD_GET(REG_RULE_END_FREQ,
+ wmi_reg_rule[count].freq_info);
+ reg_rule_ptr[count].max_bw =
+ FIELD_GET(REG_RULE_MAX_BW,
+ wmi_reg_rule[count].bw_pwr_info);
+ reg_rule_ptr[count].reg_power =
+ FIELD_GET(REG_RULE_REG_PWR,
+ wmi_reg_rule[count].bw_pwr_info);
+ reg_rule_ptr[count].ant_gain =
+ FIELD_GET(REG_RULE_ANT_GAIN,
+ wmi_reg_rule[count].bw_pwr_info);
+ reg_rule_ptr[count].flags =
+ FIELD_GET(REG_RULE_FLAGS,
+ wmi_reg_rule[count].flag_info);
+ }
+
+ return reg_rule_ptr;
+}
+
+static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct cur_regulatory_info *reg_info)
+{
+ const void **tb;
+ const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr;
+ struct wmi_regulatory_rule_struct *wmi_reg_rule;
+ u32 num_2ghz_reg_rules, num_5ghz_reg_rules;
+ int ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n");
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT];
+ if (!chan_list_event_hdr) {
+ ath11k_warn(ab, "failed to fetch reg chan list update ev\n");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ reg_info->num_2ghz_reg_rules = chan_list_event_hdr->num_2ghz_reg_rules;
+ reg_info->num_5ghz_reg_rules = chan_list_event_hdr->num_5ghz_reg_rules;
+
+ if (!(reg_info->num_2ghz_reg_rules + reg_info->num_5ghz_reg_rules)) {
+ ath11k_warn(ab, "No regulatory rules available in the event info\n");
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2,
+ REG_ALPHA2_LEN);
+ reg_info->dfs_region = chan_list_event_hdr->dfs_region;
+ reg_info->phybitmap = chan_list_event_hdr->phybitmap;
+ reg_info->num_phy = chan_list_event_hdr->num_phy;
+ reg_info->phy_id = chan_list_event_hdr->phy_id;
+ reg_info->ctry_code = chan_list_event_hdr->country_id;
+ reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "status_code %s",
+ ath11k_cc_status_to_str(reg_info->status_code));
+
+ reg_info->status_code =
+ ath11k_wmi_cc_setting_code_to_reg(chan_list_event_hdr->status_code);
+
+ reg_info->is_ext_reg_event = false;
+
+ reg_info->min_bw_2ghz = chan_list_event_hdr->min_bw_2ghz;
+ reg_info->max_bw_2ghz = chan_list_event_hdr->max_bw_2ghz;
+ reg_info->min_bw_5ghz = chan_list_event_hdr->min_bw_5ghz;
+ reg_info->max_bw_5ghz = chan_list_event_hdr->max_bw_5ghz;
+
+ num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules;
+ num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "cc %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d",
+ reg_info->alpha2, reg_info->dfs_region,
+ reg_info->min_bw_2ghz, reg_info->max_bw_2ghz,
+ reg_info->min_bw_5ghz, reg_info->max_bw_5ghz);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "num_2ghz_reg_rules %d num_5ghz_reg_rules %d",
+ num_2ghz_reg_rules, num_5ghz_reg_rules);
+
+ wmi_reg_rule =
+ (struct wmi_regulatory_rule_struct *)((u8 *)chan_list_event_hdr
+ + sizeof(*chan_list_event_hdr)
+ + sizeof(struct wmi_tlv));
+
+ if (num_2ghz_reg_rules) {
+ reg_info->reg_rules_2ghz_ptr =
+ create_reg_rules_from_wmi(num_2ghz_reg_rules,
+ wmi_reg_rule);
+ if (!reg_info->reg_rules_2ghz_ptr) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 2 GHz rules\n");
+ return -ENOMEM;
+ }
+
+ ath11k_print_reg_rule(ab, "2 GHz",
+ num_2ghz_reg_rules,
+ reg_info->reg_rules_2ghz_ptr);
+ }
+
+ if (num_5ghz_reg_rules) {
+ wmi_reg_rule += num_2ghz_reg_rules;
+ reg_info->reg_rules_5ghz_ptr =
+ create_reg_rules_from_wmi(num_5ghz_reg_rules,
+ wmi_reg_rule);
+ if (!reg_info->reg_rules_5ghz_ptr) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 5 GHz rules\n");
+ return -ENOMEM;
+ }
+
+ ath11k_print_reg_rule(ab, "5 GHz",
+ num_5ghz_reg_rules,
+ reg_info->reg_rules_5ghz_ptr);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory channel list\n");
+
+ kfree(tb);
+ return 0;
+}
+
+static struct cur_reg_rule
+*create_ext_reg_rules_from_wmi(u32 num_reg_rules,
+ struct wmi_regulatory_ext_rule *wmi_reg_rule)
+{
+ struct cur_reg_rule *reg_rule_ptr;
+ u32 count;
+
+ reg_rule_ptr = kcalloc(num_reg_rules, sizeof(*reg_rule_ptr), GFP_ATOMIC);
+
+ if (!reg_rule_ptr)
+ return NULL;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ reg_rule_ptr[count].start_freq =
+ u32_get_bits(wmi_reg_rule[count].freq_info,
+ REG_RULE_START_FREQ);
+ reg_rule_ptr[count].end_freq =
+ u32_get_bits(wmi_reg_rule[count].freq_info,
+ REG_RULE_END_FREQ);
+ reg_rule_ptr[count].max_bw =
+ u32_get_bits(wmi_reg_rule[count].bw_pwr_info,
+ REG_RULE_MAX_BW);
+ reg_rule_ptr[count].reg_power =
+ u32_get_bits(wmi_reg_rule[count].bw_pwr_info,
+ REG_RULE_REG_PWR);
+ reg_rule_ptr[count].ant_gain =
+ u32_get_bits(wmi_reg_rule[count].bw_pwr_info,
+ REG_RULE_ANT_GAIN);
+ reg_rule_ptr[count].flags =
+ u32_get_bits(wmi_reg_rule[count].flag_info,
+ REG_RULE_FLAGS);
+ reg_rule_ptr[count].psd_flag =
+ u32_get_bits(wmi_reg_rule[count].psd_power_info,
+ REG_RULE_PSD_INFO);
+ reg_rule_ptr[count].psd_eirp =
+ u32_get_bits(wmi_reg_rule[count].psd_power_info,
+ REG_RULE_PSD_EIRP);
+ }
+
+ return reg_rule_ptr;
+}
+
+static u8
+ath11k_invalid_5ghz_reg_ext_rules_from_wmi(u32 num_reg_rules,
+ const struct wmi_regulatory_ext_rule *rule)
+{
+ u8 num_invalid_5ghz_rules = 0;
+ u32 count, start_freq;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ start_freq = u32_get_bits(rule[count].freq_info,
+ REG_RULE_START_FREQ);
+
+ if (start_freq >= ATH11K_MIN_6G_FREQ)
+ num_invalid_5ghz_rules++;
+ }
+
+ return num_invalid_5ghz_rules;
+}
+
+static int ath11k_pull_reg_chan_list_ext_update_ev(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct cur_regulatory_info *reg_info)
+{
+ const void **tb;
+ const struct wmi_reg_chan_list_cc_ext_event *ev;
+ struct wmi_regulatory_ext_rule *ext_wmi_reg_rule;
+ u32 num_2ghz_reg_rules, num_5ghz_reg_rules;
+ u32 num_6ghz_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 num_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 total_reg_rules = 0;
+ int ret, i, j, num_invalid_5ghz_ext_rules = 0;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory ext channel list\n");
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch reg chan list ext update ev\n");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ reg_info->num_2ghz_reg_rules = ev->num_2ghz_reg_rules;
+ reg_info->num_5ghz_reg_rules = ev->num_5ghz_reg_rules;
+ reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] =
+ ev->num_6ghz_reg_rules_ap_lpi;
+ reg_info->num_6ghz_rules_ap[WMI_REG_STANDARD_POWER_AP] =
+ ev->num_6ghz_reg_rules_ap_sp;
+ reg_info->num_6ghz_rules_ap[WMI_REG_VERY_LOW_POWER_AP] =
+ ev->num_6ghz_reg_rules_ap_vlp;
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->num_6ghz_rules_client[WMI_REG_INDOOR_AP][i] =
+ ev->num_6ghz_reg_rules_client_lpi[i];
+ reg_info->num_6ghz_rules_client[WMI_REG_STANDARD_POWER_AP][i] =
+ ev->num_6ghz_reg_rules_client_sp[i];
+ reg_info->num_6ghz_rules_client[WMI_REG_VERY_LOW_POWER_AP][i] =
+ ev->num_6ghz_reg_rules_client_vlp[i];
+ }
+
+ num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules;
+ num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules;
+
+ total_reg_rules += num_2ghz_reg_rules;
+ total_reg_rules += num_5ghz_reg_rules;
+
+ if ((num_2ghz_reg_rules > MAX_REG_RULES) ||
+ (num_5ghz_reg_rules > MAX_REG_RULES)) {
+ ath11k_warn(ab, "Num reg rules for 2.4 GHz/5 GHz exceeds max limit (num_2ghz_reg_rules: %d num_5ghz_reg_rules: %d max_rules: %d)\n",
+ num_2ghz_reg_rules, num_5ghz_reg_rules, MAX_REG_RULES);
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ num_6ghz_reg_rules_ap[i] = reg_info->num_6ghz_rules_ap[i];
+
+ if (num_6ghz_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) {
+ ath11k_warn(ab, "Num 6 GHz reg rules for AP mode(%d) exceeds max limit (num_6ghz_reg_rules_ap: %d, max_rules: %d)\n",
+ i, num_6ghz_reg_rules_ap[i], MAX_6GHZ_REG_RULES);
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ total_reg_rules += num_6ghz_reg_rules_ap[i];
+ }
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ num_6ghz_client[WMI_REG_INDOOR_AP][i] =
+ reg_info->num_6ghz_rules_client[WMI_REG_INDOOR_AP][i];
+ total_reg_rules += num_6ghz_client[WMI_REG_INDOOR_AP][i];
+
+ num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] =
+ reg_info->num_6ghz_rules_client[WMI_REG_STANDARD_POWER_AP][i];
+ total_reg_rules += num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i];
+
+ num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] =
+ reg_info->num_6ghz_rules_client[WMI_REG_VERY_LOW_POWER_AP][i];
+ total_reg_rules += num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i];
+
+ if ((num_6ghz_client[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES) ||
+ (num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] >
+ MAX_6GHZ_REG_RULES) ||
+ (num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] >
+ MAX_6GHZ_REG_RULES)) {
+ ath11k_warn(ab,
+ "Num 6 GHz client reg rules exceeds max limit, for client(type: %d)\n",
+ i);
+ kfree(tb);
+ return -EINVAL;
+ }
+ }
+
+ if (!total_reg_rules) {
+ ath11k_warn(ab, "No reg rules available\n");
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
+
+ reg_info->dfs_region = ev->dfs_region;
+ reg_info->phybitmap = ev->phybitmap;
+ reg_info->num_phy = ev->num_phy;
+ reg_info->phy_id = ev->phy_id;
+ reg_info->ctry_code = ev->country_id;
+ reg_info->reg_dmn_pair = ev->domain_code;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "status_code %s",
+ ath11k_cc_status_to_str(reg_info->status_code));
+
+ reg_info->status_code =
+ ath11k_wmi_cc_setting_code_to_reg(ev->status_code);
+
+ reg_info->is_ext_reg_event = true;
+
+ reg_info->min_bw_2ghz = ev->min_bw_2ghz;
+ reg_info->max_bw_2ghz = ev->max_bw_2ghz;
+ reg_info->min_bw_5ghz = ev->min_bw_5ghz;
+ reg_info->max_bw_5ghz = ev->max_bw_5ghz;
+
+ reg_info->min_bw_6ghz_ap[WMI_REG_INDOOR_AP] =
+ ev->min_bw_6ghz_ap_lpi;
+ reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP] =
+ ev->max_bw_6ghz_ap_lpi;
+ reg_info->min_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP] =
+ ev->min_bw_6ghz_ap_sp;
+ reg_info->max_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP] =
+ ev->max_bw_6ghz_ap_sp;
+ reg_info->min_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] =
+ ev->min_bw_6ghz_ap_vlp;
+ reg_info->max_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] =
+ ev->max_bw_6ghz_ap_vlp;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz AP BW: LPI (%d - %d), SP (%d - %d), VLP (%d - %d)\n",
+ reg_info->min_bw_6ghz_ap[WMI_REG_INDOOR_AP],
+ reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP],
+ reg_info->min_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP],
+ reg_info->max_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP],
+ reg_info->min_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP],
+ reg_info->max_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP]);
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->min_bw_6ghz_client[WMI_REG_INDOOR_AP][i] =
+ ev->min_bw_6ghz_client_lpi[i];
+ reg_info->max_bw_6ghz_client[WMI_REG_INDOOR_AP][i] =
+ ev->max_bw_6ghz_client_lpi[i];
+ reg_info->min_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] =
+ ev->min_bw_6ghz_client_sp[i];
+ reg_info->max_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] =
+ ev->max_bw_6ghz_client_sp[i];
+ reg_info->min_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] =
+ ev->min_bw_6ghz_client_vlp[i];
+ reg_info->max_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] =
+ ev->max_bw_6ghz_client_vlp[i];
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz %s BW: LPI (%d - %d), SP (%d - %d), VLP (%d - %d)\n",
+ ath11k_6ghz_client_type_to_str(i),
+ reg_info->min_bw_6ghz_client[WMI_REG_INDOOR_AP][i],
+ reg_info->max_bw_6ghz_client[WMI_REG_INDOOR_AP][i],
+ reg_info->min_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i],
+ reg_info->max_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i],
+ reg_info->min_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i],
+ reg_info->max_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i]);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "cc_ext %s dfs %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d phy_bitmap 0x%x",
+ reg_info->alpha2, reg_info->dfs_region,
+ reg_info->min_bw_2ghz, reg_info->max_bw_2ghz,
+ reg_info->min_bw_5ghz, reg_info->max_bw_5ghz,
+ reg_info->phybitmap);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "num_2ghz_reg_rules %d num_5ghz_reg_rules %d",
+ num_2ghz_reg_rules, num_5ghz_reg_rules);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "num_6ghz_reg_rules_ap_lpi: %d num_6ghz_reg_rules_ap_sp: %d num_6ghz_reg_rules_ap_vlp: %d",
+ num_6ghz_reg_rules_ap[WMI_REG_INDOOR_AP],
+ num_6ghz_reg_rules_ap[WMI_REG_STANDARD_POWER_AP],
+ num_6ghz_reg_rules_ap[WMI_REG_VERY_LOW_POWER_AP]);
+
+ j = WMI_REG_DEFAULT_CLIENT;
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz Regular client: num_6ghz_reg_rules_lpi: %d num_6ghz_reg_rules_sp: %d num_6ghz_reg_rules_vlp: %d",
+ num_6ghz_client[WMI_REG_INDOOR_AP][j],
+ num_6ghz_client[WMI_REG_STANDARD_POWER_AP][j],
+ num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][j]);
+
+ j = WMI_REG_SUBORDINATE_CLIENT;
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz Subordinate client: num_6ghz_reg_rules_lpi: %d num_6ghz_reg_rules_sp: %d num_6ghz_reg_rules_vlp: %d",
+ num_6ghz_client[WMI_REG_INDOOR_AP][j],
+ num_6ghz_client[WMI_REG_STANDARD_POWER_AP][j],
+ num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][j]);
+
+ ext_wmi_reg_rule =
+ (struct wmi_regulatory_ext_rule *)((u8 *)ev + sizeof(*ev) +
+ sizeof(struct wmi_tlv));
+ if (num_2ghz_reg_rules) {
+ reg_info->reg_rules_2ghz_ptr =
+ create_ext_reg_rules_from_wmi(num_2ghz_reg_rules,
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_2ghz_ptr) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 2 GHz rules\n");
+ return -ENOMEM;
+ }
+
+ ath11k_print_reg_rule(ab, "2 GHz",
+ num_2ghz_reg_rules,
+ reg_info->reg_rules_2ghz_ptr);
+ }
+
+ ext_wmi_reg_rule += num_2ghz_reg_rules;
+
+ /* Firmware might include 6 GHz reg rule in 5 GHz rule list
+ * for few countries along with separate 6 GHz rule.
+ * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
+ * causes intersect check to be true, and same rules will be
+ * shown multiple times in iw cmd.
+ * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
+ */
+ num_invalid_5ghz_ext_rules =
+ ath11k_invalid_5ghz_reg_ext_rules_from_wmi(num_5ghz_reg_rules,
+ ext_wmi_reg_rule);
+
+ if (num_invalid_5ghz_ext_rules) {
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
+ reg_info->alpha2, reg_info->num_5ghz_reg_rules,
+ num_invalid_5ghz_ext_rules);
+
+ num_5ghz_reg_rules = num_5ghz_reg_rules - num_invalid_5ghz_ext_rules;
+ reg_info->num_5ghz_reg_rules = num_5ghz_reg_rules;
+ }
+
+ if (num_5ghz_reg_rules) {
+ reg_info->reg_rules_5ghz_ptr =
+ create_ext_reg_rules_from_wmi(num_5ghz_reg_rules,
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_5ghz_ptr) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 5 GHz rules\n");
+ return -ENOMEM;
+ }
+
+ ath11k_print_reg_rule(ab, "5 GHz",
+ num_5ghz_reg_rules,
+ reg_info->reg_rules_5ghz_ptr);
+ }
+
+ /* We have adjusted the number of 5 GHz reg rules above. But still those
+ * many rules needs to be adjusted in ext_wmi_reg_rule.
+ *
+ * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
+ */
+ ext_wmi_reg_rule += (num_5ghz_reg_rules + num_invalid_5ghz_ext_rules);
+
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ reg_info->reg_rules_6ghz_ap_ptr[i] =
+ create_ext_reg_rules_from_wmi(num_6ghz_reg_rules_ap[i],
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_6ghz_ap_ptr[i]) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 6 GHz AP rules\n");
+ return -ENOMEM;
+ }
+
+ ath11k_print_reg_rule(ab, ath11k_6ghz_ap_type_to_str(i),
+ num_6ghz_reg_rules_ap[i],
+ reg_info->reg_rules_6ghz_ap_ptr[i]);
+
+ ext_wmi_reg_rule += num_6ghz_reg_rules_ap[i];
+ }
+
+ for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz AP type %s", ath11k_6ghz_ap_type_to_str(j));
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->reg_rules_6ghz_client_ptr[j][i] =
+ create_ext_reg_rules_from_wmi(num_6ghz_client[j][i],
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_6ghz_client_ptr[j][i]) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 6 GHz client rules\n");
+ return -ENOMEM;
+ }
+
+ ath11k_print_reg_rule(ab,
+ ath11k_6ghz_client_type_to_str(i),
+ num_6ghz_client[j][i],
+ reg_info->reg_rules_6ghz_client_ptr[j][i]);
+
+ ext_wmi_reg_rule += num_6ghz_client[j][i];
+ }
+ }
+
+ reg_info->client_type = ev->client_type;
+ reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
+ reg_info->unspecified_ap_usable =
+ ev->unspecified_ap_usable;
+ reg_info->domain_code_6ghz_ap[WMI_REG_INDOOR_AP] =
+ ev->domain_code_6ghz_ap_lpi;
+ reg_info->domain_code_6ghz_ap[WMI_REG_STANDARD_POWER_AP] =
+ ev->domain_code_6ghz_ap_sp;
+ reg_info->domain_code_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] =
+ ev->domain_code_6ghz_ap_vlp;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz reg info client type %s rnr_tpe_usable %d unspecified_ap_usable %d AP sub domain: lpi %s, sp %s, vlp %s\n",
+ ath11k_6ghz_client_type_to_str(reg_info->client_type),
+ reg_info->rnr_tpe_usable,
+ reg_info->unspecified_ap_usable,
+ ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_lpi),
+ ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_sp),
+ ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_vlp));
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->domain_code_6ghz_client[WMI_REG_INDOOR_AP][i] =
+ ev->domain_code_6ghz_client_lpi[i];
+ reg_info->domain_code_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] =
+ ev->domain_code_6ghz_client_sp[i];
+ reg_info->domain_code_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] =
+ ev->domain_code_6ghz_client_vlp[i];
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz client type %s client sub domain: lpi %s, sp %s, vlp %s\n",
+ ath11k_6ghz_client_type_to_str(i),
+ ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_lpi[i]),
+ ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_sp[i]),
+ ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_vlp[i])
+ );
+ }
+
+ reg_info->domain_code_6ghz_super_id = ev->domain_code_6ghz_super_id;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz client_type %s 6 GHz super domain %s",
+ ath11k_6ghz_client_type_to_str(reg_info->client_type),
+ ath11k_super_reg_6ghz_to_str(reg_info->domain_code_6ghz_super_id));
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory ext channel list\n");
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_delete_resp_event *peer_del_resp)
+{
+ const void **tb;
+ const struct wmi_peer_delete_resp_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch peer delete resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ memset(peer_del_resp, 0, sizeof(*peer_del_resp));
+
+ peer_del_resp->vdev_id = ev->vdev_id;
+ ether_addr_copy(peer_del_resp->peer_macaddr.addr,
+ ev->peer_macaddr.addr);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ u32 *vdev_id)
+{
+ const void **tb;
+ const struct wmi_vdev_delete_resp_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev delete resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ u32 *vdev_id, u32 *tx_status)
+{
+ const void **tb;
+ const struct wmi_bcn_tx_status_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch bcn tx status ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = ev->vdev_id;
+ *tx_status = ev->tx_status;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_buff *skb,
+ u32 *vdev_id)
+{
+ const void **tb;
+ const struct wmi_vdev_stopped_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev stop ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_wmi_tlv_mgmt_rx_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_mgmt_rx_parse *parse = data;
+
+ switch (tag) {
+ case WMI_TAG_MGMT_RX_HDR:
+ parse->fixed = ptr;
+ break;
+ case WMI_TAG_ARRAY_BYTE:
+ if (!parse->frame_buf_done) {
+ parse->frame_buf = ptr;
+ parse->frame_buf_done = true;
+ }
+ break;
+ }
+ return 0;
+}
+
+static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct mgmt_rx_event_params *hdr)
+{
+ struct wmi_tlv_mgmt_rx_parse parse = { };
+ const struct wmi_mgmt_rx_hdr *ev;
+ const u8 *frame;
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_mgmt_rx_parse,
+ &parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse mgmt rx tlv %d\n",
+ ret);
+ return ret;
+ }
+
+ ev = parse.fixed;
+ frame = parse.frame_buf;
+
+ if (!ev || !frame) {
+ ath11k_warn(ab, "failed to fetch mgmt rx hdr");
+ return -EPROTO;
+ }
+
+ hdr->pdev_id = ev->pdev_id;
+ hdr->chan_freq = ev->chan_freq;
+ hdr->channel = ev->channel;
+ hdr->snr = ev->snr;
+ hdr->rate = ev->rate;
+ hdr->phy_mode = ev->phy_mode;
+ hdr->buf_len = ev->buf_len;
+ hdr->status = ev->status;
+ hdr->flags = ev->flags;
+ hdr->rssi = ev->rssi;
+ hdr->tsf_delta = ev->tsf_delta;
+ memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl));
+
+ if (skb->len < (frame - skb->data) + hdr->buf_len) {
+ ath11k_warn(ab, "invalid length in mgmt rx hdr ev");
+ return -EPROTO;
+ }
+
+ /* shift the sk_buff to point to `frame` */
+ skb_trim(skb, 0);
+ skb_put(skb, frame - skb->data);
+ skb_pull(skb, frame - skb->data);
+ skb_put(skb, hdr->buf_len);
+
+ ath11k_ce_byte_swap(skb->data, hdr->buf_len);
+
+ return 0;
+}
+
+static int wmi_process_mgmt_tx_comp(struct ath11k *ar,
+ struct wmi_mgmt_tx_compl_event *tx_compl_param)
+{
+ struct sk_buff *msdu;
+ struct ieee80211_tx_info *info;
+ struct ath11k_skb_cb *skb_cb;
+ int num_mgmt;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ msdu = idr_find(&ar->txmgmt_idr, tx_compl_param->desc_id);
+
+ if (!msdu) {
+ ath11k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
+ tx_compl_param->desc_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ return -ENOENT;
+ }
+
+ idr_remove(&ar->txmgmt_idr, tx_compl_param->desc_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+ dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+ info->status.rates[0].idx = -1;
+
+ if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) &&
+ !tx_compl_param->status) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ if (test_bit(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI,
+ ar->ab->wmi_ab.svc_map))
+ info->status.ack_signal = tx_compl_param->ack_rssi;
+ }
+
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
+
+ /* WARN when we received this event without doing any mgmt tx */
+ if (num_mgmt < 0)
+ WARN_ON_ONCE(1);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "mgmt tx comp pending %d desc id %d\n",
+ num_mgmt, tx_compl_param->desc_id);
+
+ if (!num_mgmt)
+ wake_up(&ar->txmgmt_empty_waitq);
+
+ return 0;
+}
+
+static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct wmi_mgmt_tx_compl_event *param)
+{
+ const void **tb;
+ const struct wmi_mgmt_tx_compl_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch mgmt tx compl ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ param->pdev_id = ev->pdev_id;
+ param->desc_id = ev->desc_id;
+ param->status = ev->status;
+ param->ack_rssi = ev->ack_rssi;
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath11k_wmi_event_scan_started(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ath11k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_STARTING:
+ ar->scan.state = ATH11K_SCAN_RUNNING;
+ if (ar->scan.is_roc)
+ ieee80211_ready_on_channel(ar->hw);
+ complete(&ar->scan.started);
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_start_failed(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ath11k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_STARTING:
+ complete(&ar->scan.started);
+ __ath11k_mac_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_completed(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ /* One suspected reason scan can be completed while starting is
+ * if firmware fails to deliver all scan events to the host,
+ * e.g. when transport pipe is full. This has been observed
+ * with spectral scan phyerr events starving wmi transport
+ * pipe. In such case the "scan completed" event should be (and
+ * is) ignored by the host as it may be just firmware's scan
+ * state machine recovering.
+ */
+ ath11k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ __ath11k_mac_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_bss_chan(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ ath11k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ar->scan_channel = NULL;
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ ath11k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+ complete(&ar->scan.on_channel);
+ break;
+ }
+}
+
+static const char *
+ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
+ enum wmi_scan_completion_reason reason)
+{
+ switch (type) {
+ case WMI_SCAN_EVENT_STARTED:
+ return "started";
+ case WMI_SCAN_EVENT_COMPLETED:
+ switch (reason) {
+ case WMI_SCAN_REASON_COMPLETED:
+ return "completed";
+ case WMI_SCAN_REASON_CANCELLED:
+ return "completed [cancelled]";
+ case WMI_SCAN_REASON_PREEMPTED:
+ return "completed [preempted]";
+ case WMI_SCAN_REASON_TIMEDOUT:
+ return "completed [timedout]";
+ case WMI_SCAN_REASON_INTERNAL_FAILURE:
+ return "completed [internal err]";
+ case WMI_SCAN_REASON_MAX:
+ break;
+ }
+ return "completed [unknown]";
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ return "bss channel";
+ case WMI_SCAN_EVENT_FOREIGN_CHAN:
+ return "foreign channel";
+ case WMI_SCAN_EVENT_DEQUEUED:
+ return "dequeued";
+ case WMI_SCAN_EVENT_PREEMPTED:
+ return "preempted";
+ case WMI_SCAN_EVENT_START_FAILED:
+ return "start failed";
+ case WMI_SCAN_EVENT_RESTARTED:
+ return "restarted";
+ case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+ return "foreign channel exit";
+ default:
+ return "unknown";
+ }
+}
+
+static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_scan_event *scan_evt_param)
+{
+ const void **tb;
+ const struct wmi_scan_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_SCAN_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch scan ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ scan_evt_param->event_type = ev->event_type;
+ scan_evt_param->reason = ev->reason;
+ scan_evt_param->channel_freq = ev->channel_freq;
+ scan_evt_param->scan_req_id = ev->scan_req_id;
+ scan_evt_param->scan_id = ev->scan_id;
+ scan_evt_param->vdev_id = ev->vdev_id;
+ scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_sta_kickout_arg *arg)
+{
+ const void **tb;
+ const struct wmi_peer_sta_kickout_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch peer sta kickout ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->mac_addr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_roam_event *roam_ev)
+{
+ const void **tb;
+ const struct wmi_roam_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_ROAM_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch roam ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ roam_ev->vdev_id = ev->vdev_id;
+ roam_ev->reason = ev->reason;
+ roam_ev->rssi = ev->rssi;
+
+ kfree(tb);
+ return 0;
+}
+
+static int freq_to_idx(struct ath11k *ar, int freq)
+{
+ struct ieee80211_supported_band *sband;
+ int band, ch, idx = 0;
+
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ sband = ar->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
+ if (sband->channels[ch].center_freq == freq)
+ goto exit;
+ }
+
+exit:
+ return idx;
+}
+
+static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_chan_info_event *ch_info_ev)
+{
+ const void **tb;
+ const struct wmi_chan_info_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_CHAN_INFO_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch chan info ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ ch_info_ev->err_code = ev->err_code;
+ ch_info_ev->freq = ev->freq;
+ ch_info_ev->cmd_flags = ev->cmd_flags;
+ ch_info_ev->noise_floor = ev->noise_floor;
+ ch_info_ev->rx_clear_count = ev->rx_clear_count;
+ ch_info_ev->cycle_count = ev->cycle_count;
+ ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+ ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+ ch_info_ev->rx_frame_count = ev->rx_frame_count;
+ ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
+ ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
+ ch_info_ev->vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
+{
+ const void **tb;
+ const struct wmi_pdev_bss_chan_info_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev bss chan info ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ bss_ch_info_ev->pdev_id = ev->pdev_id;
+ bss_ch_info_ev->freq = ev->freq;
+ bss_ch_info_ev->noise_floor = ev->noise_floor;
+ bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
+ bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
+ bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
+ bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
+ bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
+ bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
+ bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
+ bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
+ bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
+ bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_vdev_install_key_complete_arg *arg)
+{
+ const void **tb;
+ const struct wmi_vdev_install_key_compl_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev install key compl ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = ev->vdev_id;
+ arg->macaddr = ev->peer_macaddr.addr;
+ arg->key_idx = ev->key_idx;
+ arg->key_flags = ev->key_flags;
+ arg->status = ev->status;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
+{
+ const void **tb;
+ const struct wmi_peer_assoc_conf_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch peer assoc conf ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ peer_assoc_conf->vdev_id = ev->vdev_id;
+ peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+ struct ath11k_fw_stats_pdev *dst)
+{
+ dst->ch_noise_floor = src->chan_nf;
+ dst->tx_frame_count = src->tx_frame_count;
+ dst->rx_frame_count = src->rx_frame_count;
+ dst->rx_clear_count = src->rx_clear_count;
+ dst->cycle_count = src->cycle_count;
+ dst->phy_err_count = src->phy_err_count;
+ dst->chan_tx_power = src->chan_tx_pwr;
+}
+
+static void
+ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+ struct ath11k_fw_stats_pdev *dst)
+{
+ dst->comp_queued = src->comp_queued;
+ dst->comp_delivered = src->comp_delivered;
+ dst->msdu_enqued = src->msdu_enqued;
+ dst->mpdu_enqued = src->mpdu_enqued;
+ dst->wmm_drop = src->wmm_drop;
+ dst->local_enqued = src->local_enqued;
+ dst->local_freed = src->local_freed;
+ dst->hw_queued = src->hw_queued;
+ dst->hw_reaped = src->hw_reaped;
+ dst->underrun = src->underrun;
+ dst->hw_paused = src->hw_paused;
+ dst->tx_abort = src->tx_abort;
+ dst->mpdus_requeued = src->mpdus_requeued;
+ dst->tx_ko = src->tx_ko;
+ dst->tx_xretry = src->tx_xretry;
+ dst->data_rc = src->data_rc;
+ dst->self_triggers = src->self_triggers;
+ dst->sw_retry_failure = src->sw_retry_failure;
+ dst->illgl_rate_phy_err = src->illgl_rate_phy_err;
+ dst->pdev_cont_xretry = src->pdev_cont_xretry;
+ dst->pdev_tx_timeout = src->pdev_tx_timeout;
+ dst->pdev_resets = src->pdev_resets;
+ dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure;
+ dst->phy_underrun = src->phy_underrun;
+ dst->txop_ovf = src->txop_ovf;
+ dst->seq_posted = src->seq_posted;
+ dst->seq_failed_queueing = src->seq_failed_queueing;
+ dst->seq_completed = src->seq_completed;
+ dst->seq_restarted = src->seq_restarted;
+ dst->mu_seq_posted = src->mu_seq_posted;
+ dst->mpdus_sw_flush = src->mpdus_sw_flush;
+ dst->mpdus_hw_filter = src->mpdus_hw_filter;
+ dst->mpdus_truncated = src->mpdus_truncated;
+ dst->mpdus_ack_failed = src->mpdus_ack_failed;
+ dst->mpdus_expired = src->mpdus_expired;
+}
+
+static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+ struct ath11k_fw_stats_pdev *dst)
+{
+ dst->mid_ppdu_route_change = src->mid_ppdu_route_change;
+ dst->status_rcvd = src->status_rcvd;
+ dst->r0_frags = src->r0_frags;
+ dst->r1_frags = src->r1_frags;
+ dst->r2_frags = src->r2_frags;
+ dst->r3_frags = src->r3_frags;
+ dst->htt_msdus = src->htt_msdus;
+ dst->htt_mpdus = src->htt_mpdus;
+ dst->loc_msdus = src->loc_msdus;
+ dst->loc_mpdus = src->loc_mpdus;
+ dst->oversize_amsdu = src->oversize_amsdu;
+ dst->phy_errs = src->phy_errs;
+ dst->phy_err_drop = src->phy_err_drop;
+ dst->mpdu_errs = src->mpdu_errs;
+ dst->rx_ovfl_errs = src->rx_ovfl_errs;
+}
+
+static void
+ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats *src,
+ struct ath11k_fw_stats_vdev *dst)
+{
+ int i;
+
+ dst->vdev_id = src->vdev_id;
+ dst->beacon_snr = src->beacon_snr;
+ dst->data_snr = src->data_snr;
+ dst->num_rx_frames = src->num_rx_frames;
+ dst->num_rts_fail = src->num_rts_fail;
+ dst->num_rts_success = src->num_rts_success;
+ dst->num_rx_err = src->num_rx_err;
+ dst->num_rx_discard = src->num_rx_discard;
+ dst->num_tx_not_acked = src->num_tx_not_acked;
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
+ dst->num_tx_frames[i] = src->num_tx_frames[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
+ dst->num_tx_frames_retries[i] = src->num_tx_frames_retries[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
+ dst->num_tx_frames_failures[i] = src->num_tx_frames_failures[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
+ dst->tx_rate_history[i] = src->tx_rate_history[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
+ dst->beacon_rssi_history[i] = src->beacon_rssi_history[i];
+}
+
+static void
+ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src,
+ struct ath11k_fw_stats_bcn *dst)
+{
+ dst->vdev_id = src->vdev_id;
+ dst->tx_bcn_succ_cnt = src->tx_bcn_succ_cnt;
+ dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt;
+}
+
+static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_fw_stats_parse *parse = data;
+ const struct wmi_stats_event *ev = parse->ev;
+ struct ath11k_fw_stats *stats = parse->stats;
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ieee80211_sta *sta;
+ struct ath11k_sta *arsta;
+ const struct wmi_rssi_stats *stats_rssi = (const struct wmi_rssi_stats *)ptr;
+ int j, ret = 0;
+
+ if (tag != WMI_TAG_RSSI_STATS)
+ return -EPROTO;
+
+ rcu_read_lock();
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+ stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "stats vdev id %d mac %pM\n",
+ stats_rssi->vdev_id, stats_rssi->peer_macaddr.addr);
+
+ arvif = ath11k_mac_get_arvif(ar, stats_rssi->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "not found vif for vdev id %d\n",
+ stats_rssi->vdev_id);
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "stats bssid %pM vif %p\n",
+ arvif->bssid, arvif->vif);
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ arvif->bssid,
+ NULL);
+ if (!sta) {
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "not found station of bssid %pM for rssi chain\n",
+ arvif->bssid);
+ goto exit;
+ }
+
+ arsta = ath11k_sta_to_arsta(sta);
+
+ BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
+ ARRAY_SIZE(stats_rssi->rssi_avg_beacon));
+
+ for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) {
+ arsta->chain_signal[j] = stats_rssi->rssi_avg_beacon[j];
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "stats beacon rssi[%d] %d data rssi[%d] %d\n",
+ j,
+ stats_rssi->rssi_avg_beacon[j],
+ j,
+ stats_rssi->rssi_avg_data[j]);
+ }
+
+exit:
+ rcu_read_unlock();
+ return ret;
+}
+
+static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab,
+ struct wmi_tlv_fw_stats_parse *parse,
+ const void *ptr,
+ u16 len)
+{
+ struct ath11k_fw_stats *stats = parse->stats;
+ const struct wmi_stats_event *ev = parse->ev;
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ieee80211_sta *sta;
+ struct ath11k_sta *arsta;
+ int i, ret = 0;
+ const void *data = ptr;
+
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch update stats ev");
+ return -EPROTO;
+ }
+
+ stats->stats_id = 0;
+
+ rcu_read_lock();
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+
+ for (i = 0; i < ev->num_pdev_stats; i++) {
+ const struct wmi_pdev_stats *src;
+ struct ath11k_fw_stats_pdev *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ stats->stats_id = WMI_REQUEST_PDEV_STAT;
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath11k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath11k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath11k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < ev->num_vdev_stats; i++) {
+ const struct wmi_vdev_stats *src;
+ struct ath11k_fw_stats_vdev *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ stats->stats_id = WMI_REQUEST_VDEV_STAT;
+
+ arvif = ath11k_mac_get_arvif(ar, src->vdev_id);
+ if (arvif) {
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ arvif->bssid,
+ NULL);
+ if (sta) {
+ arsta = ath11k_sta_to_arsta(sta);
+ arsta->rssi_beacon = src->beacon_snr;
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "stats vdev id %d snr %d\n",
+ src->vdev_id, src->beacon_snr);
+ } else {
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "not found station of bssid %pM for vdev stat\n",
+ arvif->bssid);
+ }
+ }
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath11k_wmi_pull_vdev_stats(src, dst);
+ list_add_tail(&dst->list, &stats->vdevs);
+ }
+
+ for (i = 0; i < ev->num_bcn_stats; i++) {
+ const struct wmi_bcn_stats *src;
+ struct ath11k_fw_stats_bcn *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ stats->stats_id = WMI_REQUEST_BCN_STAT;
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath11k_wmi_pull_bcn_stats(src, dst);
+ list_add_tail(&dst->list, &stats->bcn);
+ }
+
+exit:
+ rcu_read_unlock();
+ return ret;
+}
+
+static int ath11k_wmi_tlv_fw_stats_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_fw_stats_parse *parse = data;
+ int ret = 0;
+
+ switch (tag) {
+ case WMI_TAG_STATS_EVENT:
+ parse->ev = (struct wmi_stats_event *)ptr;
+ parse->stats->pdev_id = parse->ev->pdev_id;
+ break;
+ case WMI_TAG_ARRAY_BYTE:
+ ret = ath11k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
+ break;
+ case WMI_TAG_PER_CHAIN_RSSI_STATS:
+ parse->rssi = (struct wmi_per_chain_rssi_stats *)ptr;
+
+ if (parse->ev->stats_id & WMI_REQUEST_RSSI_PER_CHAIN_STAT)
+ parse->rssi_num = parse->rssi->num_per_chain_rssi_stats;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "stats id 0x%x num chain %d\n",
+ parse->ev->stats_id,
+ parse->rssi_num);
+ break;
+ case WMI_TAG_ARRAY_STRUCT:
+ if (parse->rssi_num && !parse->chain_rssi_done) {
+ ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+ ath11k_wmi_tlv_rssi_chain_parse,
+ parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse rssi chain %d\n",
+ ret);
+ return ret;
+ }
+ parse->chain_rssi_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
+ struct ath11k_fw_stats *stats)
+{
+ struct wmi_tlv_fw_stats_parse parse = { };
+
+ stats->stats_id = 0;
+ parse.stats = stats;
+
+ return ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_fw_stats_parse,
+ &parse);
+}
+
+static void
+ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath11k PDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Channel noise floor", pdev->ch_noise_floor);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Channel TX power", pdev->chan_tx_power);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX frame count", pdev->tx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX frame count", pdev->rx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX clear count", pdev->rx_clear_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Cycle count", pdev->cycle_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY error count", pdev->phy_err_count);
+
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath11k PDEV TX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "====================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies queued", pdev->comp_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies disp.", pdev->comp_delivered);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDU queued", pdev->msdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU queued", pdev->mpdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs dropped", pdev->wmm_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local enqued", pdev->local_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local freed", pdev->local_freed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW queued", pdev->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs reaped", pdev->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num underruns", pdev->underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num HW Paused", pdev->hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs cleaned", pdev->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs requeued", pdev->mpdus_requeued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PPDU OK", pdev->tx_ko);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Excessive retries", pdev->tx_xretry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "HW rate", pdev->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Sched self triggers", pdev->self_triggers);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Dropped due to SW retries",
+ pdev->sw_retry_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Illegal rate phy errors",
+ pdev->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PDEV continuous xretry", pdev->pdev_cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX timeout", pdev->pdev_tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PDEV resets", pdev->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Stateless TIDs alloc failures",
+ pdev->stateless_tid_alloc_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY underrun", pdev->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "MPDU is more than txop limit", pdev->txop_ovf);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num sequences posted", pdev->seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num seq failed queueing ", pdev->seq_failed_queueing);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num sequences completed ", pdev->seq_completed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num sequences restarted ", pdev->seq_restarted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MU sequences posted ", pdev->mu_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS SW flushed ", pdev->mpdus_sw_flush);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS HW filtered ", pdev->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS truncated ", pdev->mpdus_truncated);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS ACK failed ", pdev->mpdus_ack_failed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS expired ", pdev->mpdus_expired);
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath11k PDEV RX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "====================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Mid PPDU route change",
+ pdev->mid_ppdu_route_change);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Tot. number of statuses", pdev->status_rcvd);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 0", pdev->r0_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 1", pdev->r1_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 2", pdev->r2_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 3", pdev->r3_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to HTT", pdev->htt_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to HTT", pdev->htt_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to stack", pdev->loc_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to stack", pdev->loc_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Oversized AMSUs", pdev->oversize_amsdu);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors", pdev->phy_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors drops", pdev->phy_err_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Overflow errors", pdev->rx_ovfl_errs);
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_vdev_stats_fill(struct ath11k *ar,
+ const struct ath11k_fw_stats_vdev *vdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+ struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev->vdev_id);
+ u8 *vif_macaddr;
+ int i;
+
+ /* VDEV stats has all the active VDEVs of other PDEVs as well,
+ * ignoring those not part of requested PDEV
+ */
+ if (!arvif)
+ return;
+
+ vif_macaddr = arvif->vif->addr;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "VDEV ID", vdev->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "VDEV MAC address", vif_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "beacon snr", vdev->beacon_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "data snr", vdev->data_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx frames", vdev->num_rx_frames);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts fail", vdev->num_rts_fail);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts success", vdev->num_rts_success);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx err", vdev->num_rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx discard", vdev->num_rx_discard);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num tx not acked", vdev->num_tx_not_acked);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames", i,
+ vdev->num_tx_frames[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames retries", i,
+ vdev->num_tx_frames_retries[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames failures", i,
+ vdev->num_tx_frames_failures[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] 0x%08x\n",
+ "tx rate history", i,
+ vdev->tx_rate_history[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "beacon rssi history", i,
+ vdev->beacon_rssi_history[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_bcn_stats_fill(struct ath11k *ar,
+ const struct ath11k_fw_stats_bcn *bcn,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+ struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, bcn->vdev_id);
+ u8 *vdev_macaddr;
+
+ if (!arvif) {
+ ath11k_warn(ar->ab, "invalid vdev id %d in bcn stats",
+ bcn->vdev_id);
+ return;
+ }
+
+ vdev_macaddr = arvif->vif->addr;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "VDEV ID", bcn->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "VDEV MAC address", vdev_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "================");
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
+ struct ath11k_fw_stats *fw_stats,
+ u32 stats_id, char *buf)
+{
+ u32 len = 0;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+ const struct ath11k_fw_stats_pdev *pdev;
+ const struct ath11k_fw_stats_vdev *vdev;
+ const struct ath11k_fw_stats_bcn *bcn;
+ size_t num_bcn;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (stats_id == WMI_REQUEST_PDEV_STAT) {
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath11k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath11k_warn(ar->ab, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ ath11k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+ ath11k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+ ath11k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+ }
+
+ if (stats_id == WMI_REQUEST_VDEV_STAT) {
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath11k VDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(vdev, &fw_stats->vdevs, list)
+ ath11k_wmi_fw_vdev_stats_fill(ar, vdev, buf, &len);
+ }
+
+ if (stats_id == WMI_REQUEST_BCN_STAT) {
+ num_bcn = list_count_nodes(&fw_stats->bcn);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath11k Beacon stats", num_bcn);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "===================");
+
+ list_for_each_entry(bcn, &fw_stats->bcn, list)
+ ath11k_wmi_fw_bcn_stats_fill(ar, bcn, buf, &len);
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
+{
+ /* try to send pending beacons first. they take priority */
+ wake_up(&ab->wmi_ab.tx_credits_wq);
+}
+
+static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const struct wmi_11d_new_cc_ev *ev;
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ const void **tb;
+ int ret, i;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
+ if (!ev) {
+ kfree(tb);
+ ath11k_warn(ab, "failed to fetch 11d new cc ev");
+ return -EPROTO;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ memcpy(&ab->new_alpha2, &ev->new_alpha2, 2);
+ spin_unlock_bh(&ab->base_lock);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event 11d new cc %c%c\n",
+ ab->new_alpha2[0],
+ ab->new_alpha2[1]);
+
+ kfree(tb);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
+ queue_work(ab->workqueue, &ab->update_11d_work);
+
+ return 0;
+}
+
+static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_pdev_wmi *wmi = NULL;
+ u32 i;
+ u8 wmi_ep_count;
+ u8 eid;
+
+ eid = ATH11K_SKB_CB(skb)->eid;
+ dev_kfree_skb(skb);
+
+ if (eid >= ATH11K_HTC_EP_COUNT)
+ return;
+
+ wmi_ep_count = ab->htc.wmi_ep_count;
+ if (wmi_ep_count > ab->hw_params.max_radios)
+ return;
+
+ for (i = 0; i < ab->htc.wmi_ep_count; i++) {
+ if (ab->wmi_ab.wmi[i].eid == eid) {
+ wmi = &ab->wmi_ab.wmi[i];
+ break;
+ }
+ }
+
+ if (wmi)
+ wake_up(&wmi->tx_ce_desc_wq);
+}
+
+static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb,
+ enum wmi_reg_chan_list_cmd_type id)
+{
+ struct cur_regulatory_info *reg_info;
+ int ret;
+
+ reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
+ if (!reg_info)
+ return -ENOMEM;
+
+ if (id == WMI_REG_CHAN_LIST_CC_ID)
+ ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info);
+ else
+ ret = ath11k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
+
+ if (ret) {
+ ath11k_warn(ab, "failed to extract regulatory info\n");
+ goto mem_free;
+ }
+
+ ret = ath11k_reg_handle_chan_list(ab, reg_info, IEEE80211_REG_UNSET_AP);
+ if (ret) {
+ ath11k_warn(ab, "failed to process regulatory info %d\n", ret);
+ goto mem_free;
+ }
+
+ kfree(reg_info);
+ return 0;
+
+mem_free:
+ ath11k_reg_reset_info(reg_info);
+ kfree(reg_info);
+ return ret;
+}
+
+static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_rdy_parse *rdy_parse = data;
+ struct wmi_ready_event fixed_param;
+ struct wmi_mac_addr *addr_list;
+ struct ath11k_pdev *pdev;
+ u32 num_mac_addr;
+ int i;
+
+ switch (tag) {
+ case WMI_TAG_READY_EVENT:
+ memset(&fixed_param, 0, sizeof(fixed_param));
+ memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
+ min_t(u16, sizeof(fixed_param), len));
+ rdy_parse->num_extra_mac_addr =
+ fixed_param.ready_event_min.num_extra_mac_addr;
+
+ ether_addr_copy(ab->mac_addr,
+ fixed_param.ready_event_min.mac_addr.addr);
+ ab->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum;
+ break;
+ case WMI_TAG_ARRAY_FIXED_STRUCT:
+ addr_list = (struct wmi_mac_addr *)ptr;
+ num_mac_addr = rdy_parse->num_extra_mac_addr;
+
+ if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
+ break;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
+ }
+ ab->pdevs_macaddr_valid = true;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath11k_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_tlv_rdy_parse rdy_parse = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_rdy_parse, &rdy_parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event ready");
+
+ complete(&ab->wmi_ab.unified_ready);
+ return 0;
+}
+
+static void ath11k_peer_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_delete_resp_event peer_del_resp;
+ struct ath11k *ar;
+
+ if (ath11k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
+ ath11k_warn(ab, "failed to extract peer delete resp");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer delete resp");
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_del_resp.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in peer delete resp ev %d",
+ peer_del_resp.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->peer_delete_done);
+ rcu_read_unlock();
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
+ peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
+}
+
+static void ath11k_vdev_delete_resp_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ u32 vdev_id = 0;
+
+ if (ath11k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
+ ath11k_warn(ab, "failed to extract vdev delete resp");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
+ vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->vdev_delete_done);
+
+ rcu_read_unlock();
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev delete resp for vdev id %d\n",
+ vdev_id);
+}
+
+static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status)
+{
+ switch (vdev_resp_status) {
+ case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
+ return "invalid vdev id";
+ case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
+ return "not supported";
+ case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
+ return "dfs violation";
+ case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
+ return "invalid regdomain";
+ default:
+ return "unknown";
+ }
+}
+
+static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_vdev_start_resp_event vdev_start_resp;
+ struct ath11k *ar;
+ u32 status;
+
+ if (ath11k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
+ ath11k_warn(ab, "failed to extract vdev start resp");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event start resp event");
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_start_resp.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in vdev start resp ev %d",
+ vdev_start_resp.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ar->last_wmi_vdev_start_status = 0;
+ ar->max_allowed_tx_power = vdev_start_resp.max_allowed_tx_power;
+ status = vdev_start_resp.status;
+
+ if (WARN_ON_ONCE(status)) {
+ ath11k_warn(ab, "vdev start resp error status %d (%s)\n",
+ status, ath11k_wmi_vdev_resp_print(status));
+ ar->last_wmi_vdev_start_status = status;
+ }
+
+ complete(&ar->vdev_setup_done);
+
+ rcu_read_unlock();
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev start resp for vdev id %d",
+ vdev_start_resp.vdev_id);
+}
+
+static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k_vif *arvif;
+ u32 vdev_id, tx_status;
+
+ if (ath11k_pull_bcn_tx_status_ev(ab, skb, &vdev_id, &tx_status) != 0) {
+ ath11k_warn(ab, "failed to extract bcn tx status");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event offload bcn tx status");
+
+ rcu_read_lock();
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "invalid vdev id %d in bcn_tx_status",
+ vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ queue_work(ab->workqueue, &arvif->bcn_tx_work);
+
+ rcu_read_unlock();
+}
+
+static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_peer_sta_ps_state_chg_event *ev;
+ struct ieee80211_sta *sta;
+ struct ath11k_peer *peer;
+ struct ath11k *ar;
+ struct ath11k_sta *arsta;
+ const void **tb;
+ enum ath11k_wmi_peer_ps_state peer_previous_ps_state;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch sta ps change ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event peer sta ps change ev addr %pM state %u sup_bitmap %x ps_valid %u ts %u\n",
+ ev->peer_macaddr.addr, ev->peer_ps_state,
+ ev->ps_supported_bitmap, ev->peer_ps_valid,
+ ev->peer_ps_timestamp);
+
+ rcu_read_lock();
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_addr(ab, ev->peer_macaddr.addr);
+
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ ath11k_warn(ab, "peer not found %pM\n", ev->peer_macaddr.addr);
+ goto exit;
+ }
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
+
+ if (!ar) {
+ spin_unlock_bh(&ab->base_lock);
+ ath11k_warn(ab, "invalid vdev id in peer sta ps state change ev %d",
+ peer->vdev_id);
+
+ goto exit;
+ }
+
+ sta = peer->sta;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!sta) {
+ ath11k_warn(ab, "failed to find station entry %pM\n",
+ ev->peer_macaddr.addr);
+ goto exit;
+ }
+
+ arsta = ath11k_sta_to_arsta(sta);
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer_previous_ps_state = arsta->peer_ps_state;
+ arsta->peer_ps_state = ev->peer_ps_state;
+ arsta->peer_current_ps_valid = !!ev->peer_ps_valid;
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ if (!(ev->ps_supported_bitmap & WMI_PEER_PS_VALID) ||
+ !(ev->ps_supported_bitmap & WMI_PEER_PS_STATE_TIMESTAMP) ||
+ !ev->peer_ps_valid)
+ goto out;
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON) {
+ arsta->ps_start_time = ev->peer_ps_timestamp;
+ arsta->ps_start_jiffies = jiffies;
+ } else if (arsta->peer_ps_state == WMI_PEER_PS_STATE_OFF &&
+ peer_previous_ps_state == WMI_PEER_PS_STATE_ON) {
+ arsta->ps_total_duration = arsta->ps_total_duration +
+ (ev->peer_ps_timestamp - arsta->ps_start_time);
+ }
+
+ if (ar->ps_timekeeper_enable)
+ trace_ath11k_ps_timekeeper(ar, ev->peer_macaddr.addr,
+ ev->peer_ps_timestamp,
+ arsta->peer_ps_state);
+ }
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+exit:
+ rcu_read_unlock();
+ kfree(tb);
+}
+
+static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ u32 vdev_id = 0;
+
+ if (ath11k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
+ ath11k_warn(ab, "failed to extract vdev stopped event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev stopped");
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in vdev stopped ev %d",
+ vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->vdev_setup_done);
+
+ rcu_read_unlock();
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
+}
+
+static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct mgmt_rx_event_params rx_ev = {};
+ struct ath11k *ar;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+ struct ieee80211_supported_band *sband;
+
+ if (ath11k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
+ ath11k_warn(ab, "failed to extract mgmt rx event");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ memset(status, 0, sizeof(*status));
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT, "event mgmt rx status %08x\n",
+ rx_ev.status);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
+ rx_ev.pdev_id);
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if ((test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) ||
+ (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
+ WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ &&
+ rx_ev.chan_freq <= ATH11K_MAX_6G_FREQ) {
+ status->band = NL80211_BAND_6GHZ;
+ status->freq = rx_ev.chan_freq;
+ } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
+ status->band = NL80211_BAND_2GHZ;
+ } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) {
+ status->band = NL80211_BAND_5GHZ;
+ } else {
+ /* Shouldn't happen unless list of advertised channels to
+ * mac80211 has been changed.
+ */
+ WARN_ON_ONCE(1);
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if (rx_ev.phy_mode == MODE_11B &&
+ (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
+
+ sband = &ar->mac.sbands[status->band];
+
+ if (status->band != NL80211_BAND_6GHZ)
+ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+ status->band);
+
+ status->signal = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR;
+ status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ /* Firmware is guaranteed to report all essential management frames via
+ * WMI while it can deliver some extra via HTT. Since there can be
+ * duplicates split the reporting wrt monitor/sniffing.
+ */
+ status->flag |= RX_FLAG_SKIP_MONITOR;
+
+ /* In case of PMF, FW delivers decrypted frames with Protected Bit set.
+ * Don't clear that. Also, FW delivers broadcast management frames
+ * (ex: group privacy action frames in mesh) as encrypted payload.
+ */
+ if (ieee80211_has_protected(hdr->frame_control) &&
+ !is_multicast_ether_addr(ieee80211_get_DA(hdr))) {
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (!ieee80211_is_robust_mgmt_frame(skb)) {
+ status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(fc &
+ ~IEEE80211_FCTL_PROTECTED);
+ }
+ }
+
+ if (ieee80211_is_beacon(hdr->frame_control))
+ ath11k_mac_handle_beacon(ar, skb);
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT,
+ "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
+ skb, skb->len,
+ fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT,
+ "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
+ status->freq, status->band, status->signal,
+ status->rate_idx);
+
+ ieee80211_rx_ni(ar->hw, skb);
+
+exit:
+ rcu_read_unlock();
+}
+
+static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_mgmt_tx_compl_event tx_compl_param = {};
+ struct ath11k *ar;
+
+ if (ath11k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
+ ath11k_warn(ab, "failed to extract mgmt tx compl event");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, tx_compl_param.pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
+ tx_compl_param.pdev_id);
+ goto exit;
+ }
+
+ wmi_process_mgmt_tx_comp(ar, &tx_compl_param);
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT,
+ "event mgmt tx compl ev pdev_id %d, desc_id %d, status %d ack_rssi %d",
+ tx_compl_param.pdev_id, tx_compl_param.desc_id,
+ tx_compl_param.status, tx_compl_param.ack_rssi);
+
+exit:
+ rcu_read_unlock();
+}
+
+static struct ath11k *ath11k_get_ar_on_scan_state(struct ath11k_base *ab,
+ u32 vdev_id,
+ enum ath11k_scan_state state)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ ar = pdev->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == state &&
+ ar->scan.vdev_id == vdev_id) {
+ spin_unlock_bh(&ar->data_lock);
+ return ar;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ }
+ return NULL;
+}
+
+static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ struct wmi_scan_event scan_ev = {};
+
+ if (ath11k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
+ ath11k_warn(ab, "failed to extract scan event");
+ return;
+ }
+
+ rcu_read_lock();
+
+ /* In case the scan was cancelled, ex. during interface teardown,
+ * the interface will not be found in active interfaces.
+ * Rather, in such scenarios, iterate over the active pdev's to
+ * search 'ar' if the corresponding 'ar' scan is ABORTING and the
+ * aborting scan's vdev id matches this event info.
+ */
+ if (scan_ev.event_type == WMI_SCAN_EVENT_COMPLETED &&
+ scan_ev.reason == WMI_SCAN_REASON_CANCELLED) {
+ ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
+ ATH11K_SCAN_ABORTING);
+ if (!ar)
+ ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
+ ATH11K_SCAN_RUNNING);
+ } else {
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id);
+ }
+
+ if (!ar) {
+ ath11k_warn(ab, "Received scan event for unknown vdev");
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event scan %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
+ ath11k_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason),
+ scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq,
+ scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id,
+ ath11k_scan_state_str(ar->scan.state), ar->scan.state);
+
+ switch (scan_ev.event_type) {
+ case WMI_SCAN_EVENT_STARTED:
+ ath11k_wmi_event_scan_started(ar);
+ break;
+ case WMI_SCAN_EVENT_COMPLETED:
+ ath11k_wmi_event_scan_completed(ar);
+ break;
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ ath11k_wmi_event_scan_bss_chan(ar);
+ break;
+ case WMI_SCAN_EVENT_FOREIGN_CHAN:
+ ath11k_wmi_event_scan_foreign_chan(ar, scan_ev.channel_freq);
+ break;
+ case WMI_SCAN_EVENT_START_FAILED:
+ ath11k_warn(ab, "received scan start failure event\n");
+ ath11k_wmi_event_scan_start_failed(ar);
+ break;
+ case WMI_SCAN_EVENT_DEQUEUED:
+ __ath11k_mac_scan_finish(ar);
+ break;
+ case WMI_SCAN_EVENT_PREEMPTED:
+ case WMI_SCAN_EVENT_RESTARTED:
+ case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+ default:
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ rcu_read_unlock();
+}
+
+static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_sta_kickout_arg arg = {};
+ struct ieee80211_sta *sta;
+ struct ath11k_peer *peer;
+ struct ath11k *ar;
+ u32 vdev_id;
+
+ if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
+ ath11k_warn(ab, "failed to extract peer sta kickout event");
+ return;
+ }
+
+ rcu_read_lock();
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_addr(ab, arg.mac_addr);
+
+ if (!peer) {
+ ath11k_warn(ab, "peer not found %pM\n",
+ arg.mac_addr);
+ spin_unlock_bh(&ab->base_lock);
+ goto exit;
+ }
+
+ vdev_id = peer->vdev_id;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
+ peer->vdev_id);
+ goto exit;
+ }
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ arg.mac_addr, NULL);
+ if (!sta) {
+ ath11k_warn(ab, "Spurious quick kickout for STA %pM\n",
+ arg.mac_addr);
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer sta kickout %pM",
+ arg.mac_addr);
+
+ ieee80211_report_low_ack(sta, 10);
+
+exit:
+ rcu_read_unlock();
+}
+
+static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_roam_event roam_ev = {};
+ struct ath11k *ar;
+
+ if (ath11k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
+ ath11k_warn(ab, "failed to extract roam event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event roam vdev %u reason 0x%08x rssi %d\n",
+ roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, roam_ev.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in roam ev %d",
+ roam_ev.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ if (roam_ev.reason >= WMI_ROAM_REASON_MAX)
+ ath11k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
+ roam_ev.reason, roam_ev.vdev_id);
+
+ switch (roam_ev.reason) {
+ case WMI_ROAM_REASON_BEACON_MISS:
+ ath11k_mac_handle_beacon_miss(ar, roam_ev.vdev_id);
+ break;
+ case WMI_ROAM_REASON_BETTER_AP:
+ case WMI_ROAM_REASON_LOW_RSSI:
+ case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+ case WMI_ROAM_REASON_HO_FAILED:
+ ath11k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
+ roam_ev.reason, roam_ev.vdev_id);
+ break;
+ }
+
+ rcu_read_unlock();
+}
+
+static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_chan_info_event ch_info_ev = {};
+ struct ath11k *ar;
+ struct survey_info *survey;
+ int idx;
+ /* HW channel counters frequency value in hertz */
+ u32 cc_freq_hz = ab->cc_freq_hz;
+
+ if (ath11k_pull_chan_info_ev(ab, skb, &ch_info_ev) != 0) {
+ ath11k_warn(ab, "failed to extract chan info event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
+ ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
+ ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
+ ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
+ ch_info_ev.mac_clk_mhz);
+
+ if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) {
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "chan info report completed\n");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, ch_info_ev.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in chan info ev %d",
+ ch_info_ev.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ ath11k_warn(ab, "received chan info event without a scan request, ignoring\n");
+ goto exit;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ break;
+ }
+
+ idx = freq_to_idx(ar, ch_info_ev.freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath11k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
+ ch_info_ev.freq, idx);
+ goto exit;
+ }
+
+ /* If FW provides MAC clock frequency in Mhz, overriding the initialized
+ * HW channel counters frequency value
+ */
+ if (ch_info_ev.mac_clk_mhz)
+ cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000);
+
+ if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
+ survey = &ar->survey[idx];
+ memset(survey, 0, sizeof(*survey));
+ survey->noise = ch_info_ev.noise_floor;
+ survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY;
+ survey->time = div_u64(ch_info_ev.cycle_count, cc_freq_hz);
+ survey->time_busy = div_u64(ch_info_ev.rx_clear_count, cc_freq_hz);
+ }
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
+static void
+ath11k_pdev_bss_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
+ struct survey_info *survey;
+ struct ath11k *ar;
+ u32 cc_freq_hz = ab->cc_freq_hz;
+ u64 busy, total, tx, rx, rx_bss;
+ int idx;
+
+ if (ath11k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
+ ath11k_warn(ab, "failed to extract pdev bss chan info event");
+ return;
+ }
+
+ busy = (u64)(bss_ch_info_ev.rx_clear_count_high) << 32 |
+ bss_ch_info_ev.rx_clear_count_low;
+
+ total = (u64)(bss_ch_info_ev.cycle_count_high) << 32 |
+ bss_ch_info_ev.cycle_count_low;
+
+ tx = (u64)(bss_ch_info_ev.tx_cycle_count_high) << 32 |
+ bss_ch_info_ev.tx_cycle_count_low;
+
+ rx = (u64)(bss_ch_info_ev.rx_cycle_count_high) << 32 |
+ bss_ch_info_ev.rx_cycle_count_low;
+
+ rx_bss = (u64)(bss_ch_info_ev.rx_bss_cycle_count_high) << 32 |
+ bss_ch_info_ev.rx_bss_cycle_count_low;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
+ bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
+ bss_ch_info_ev.noise_floor, busy, total,
+ tx, rx, rx_bss);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, bss_ch_info_ev.pdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
+ bss_ch_info_ev.pdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ idx = freq_to_idx(ar, bss_ch_info_ev.freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath11k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
+ bss_ch_info_ev.freq, idx);
+ goto exit;
+ }
+
+ survey = &ar->survey[idx];
+
+ survey->noise = bss_ch_info_ev.noise_floor;
+ survey->time = div_u64(total, cc_freq_hz);
+ survey->time_busy = div_u64(busy, cc_freq_hz);
+ survey->time_rx = div_u64(rx_bss, cc_freq_hz);
+ survey->time_tx = div_u64(tx, cc_freq_hz);
+ survey->filled |= (SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->bss_survey_done);
+
+ rcu_read_unlock();
+}
+
+static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_vdev_install_key_complete_arg install_key_compl = {};
+ struct ath11k *ar;
+
+ if (ath11k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
+ ath11k_warn(ab, "failed to extract install key compl event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
+ install_key_compl.key_idx, install_key_compl.key_flags,
+ install_key_compl.macaddr, install_key_compl.status);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in install key compl ev %d",
+ install_key_compl.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ar->install_key_status = 0;
+
+ if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
+ ath11k_warn(ab, "install key failed for %pM status %d\n",
+ install_key_compl.macaddr, install_key_compl.status);
+ ar->install_key_status = install_key_compl.status;
+ }
+
+ complete(&ar->install_key_done);
+ rcu_read_unlock();
+}
+
+static int ath11k_wmi_tlv_services_parser(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const struct wmi_service_available_event *ev;
+ u32 *wmi_ext2_service_bitmap;
+ int i, j;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_AVAILABLE_EVENT:
+ ev = (struct wmi_service_available_event *)ptr;
+ for (i = 0, j = WMI_MAX_SERVICE;
+ i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
+ i++) {
+ do {
+ if (ev->wmi_service_segment_bitmap[i] &
+ BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, ab->wmi_ab.svc_map);
+ } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
+ ev->wmi_service_segment_bitmap[0],
+ ev->wmi_service_segment_bitmap[1],
+ ev->wmi_service_segment_bitmap[2],
+ ev->wmi_service_segment_bitmap[3]);
+ break;
+ case WMI_TAG_ARRAY_UINT32:
+ wmi_ext2_service_bitmap = (u32 *)ptr;
+ for (i = 0, j = WMI_MAX_EXT_SERVICE;
+ i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
+ i++) {
+ do {
+ if (wmi_ext2_service_bitmap[i] &
+ BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, ab->wmi_ab.svc_map);
+ } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi_ext2_service__bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
+ wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
+ wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
+ break;
+ }
+ return 0;
+}
+
+static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_services_parser,
+ NULL);
+ if (ret)
+ ath11k_warn(ab, "failed to parse services available tlv %d\n", ret);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event service available");
+}
+
+static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_assoc_conf_arg peer_assoc_conf = {};
+ struct ath11k *ar;
+
+ if (ath11k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
+ ath11k_warn(ab, "failed to extract peer assoc conf event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event peer assoc conf ev vdev id %d macaddr %pM\n",
+ peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
+ peer_assoc_conf.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->peer_assoc_done);
+ rcu_read_unlock();
+}
+
+static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k_fw_stats stats = {};
+ size_t total_vdevs_started = 0;
+ struct ath11k_pdev *pdev;
+ bool is_end = true;
+ int i;
+
+ struct ath11k *ar;
+ int ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.bcn);
+
+ ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
+ if (ret) {
+ ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
+ goto free;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event update stats");
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
+ if (!ar) {
+ rcu_read_unlock();
+ ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
+ stats.pdev_id, ret);
+ goto free;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_VDEV_STAT and
+ * WMI_REQUEST_RSSI_PER_CHAIN_STAT can be requested via mac ops or via
+ * debugfs fw stats. Therefore, processing it separately.
+ */
+ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+ list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
+ complete(&ar->fw_stats_done);
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
+ complete(&ar->fw_stats_done);
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats.vdevs)) {
+ ath11k_warn(ab, "empty vdev stats");
+ goto complete;
+ }
+ /* FW sends all the active VDEV stats irrespective of PDEV,
+ * hence limit until the count of all VDEVs started
+ */
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar)
+ total_vdevs_started += ar->num_started_vdevs;
+ }
+
+ if (total_vdevs_started)
+ is_end = ((++ar->fw_stats.num_vdev_recvd) ==
+ total_vdevs_started);
+
+ list_splice_tail_init(&stats.vdevs,
+ &ar->fw_stats.vdevs);
+
+ if (is_end)
+ complete(&ar->fw_stats_done);
+
+ goto complete;
+ }
+
+ /* WMI_REQUEST_BCN_STAT is currently requested only via debugfs fw stats.
+ * Hence, processing it in debugfs context
+ */
+ ath11k_debugfs_fw_stats_process(ar, &stats);
+
+complete:
+ complete(&ar->fw_stats_complete);
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+
+ /* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
+ * at this point, no need to free the individual list.
+ */
+ return;
+
+free:
+ ath11k_fw_stats_free(&stats);
+}
+
+/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
+ * is not part of BDF CTL(Conformance test limits) table entries.
+ */
+static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_ctl_failsafe_chk_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event pdev ctl failsafe check status %d\n",
+ ev->ctl_failsafe_status);
+
+ /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
+ * to 10 dBm else the CTL power entry in the BDF would be picked up.
+ */
+ if (ev->ctl_failsafe_status != 0)
+ ath11k_warn(ab, "pdev ctl failsafe failure status %d",
+ ev->ctl_failsafe_status);
+
+ kfree(tb);
+}
+
+static void
+ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab,
+ const struct wmi_pdev_csa_switch_ev *ev,
+ const u32 *vdev_ids)
+{
+ int i;
+ struct ath11k_vif *arvif;
+
+ /* Finish CSA once the switch count becomes NULL */
+ if (ev->current_switch_count)
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < ev->num_vdevs; i++) {
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
+
+ if (!arvif) {
+ ath11k_warn(ab, "Recvd csa status for unknown vdev %d",
+ vdev_ids[i]);
+ continue;
+ }
+
+ if (arvif->is_up && arvif->vif->bss_conf.csa_active)
+ ieee80211_csa_finish(arvif->vif, 0);
+ }
+ rcu_read_unlock();
+}
+
+static void
+ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_csa_switch_ev *ev;
+ const u32 *vdev_ids;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
+ vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
+
+ if (!ev || !vdev_ids) {
+ ath11k_warn(ab, "failed to fetch pdev csa switch count ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event pdev csa switch count %d for pdev %d, num_vdevs %d",
+ ev->current_switch_count, ev->pdev_id,
+ ev->num_vdevs);
+
+ ath11k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
+
+ kfree(tb);
+}
+
+static void
+ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_radar_ev *ev;
+ struct ath11k *ar;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
+
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev dfs radar detected ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
+ ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
+ ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ ev->freq_offset, ev->sidx);
+
+ rcu_read_lock();
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "radar detected in invalid pdev %d\n",
+ ev->pdev_id);
+ goto exit;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG, "DFS Radar Detected in pdev %d\n",
+ ev->pdev_id);
+
+ if (ar->dfs_block_radar_events)
+ ath11k_info(ab, "DFS Radar detected, but ignored as requested\n");
+ else
+ ieee80211_radar_detected(ar->hw, NULL);
+
+exit:
+ rcu_read_unlock();
+
+ kfree(tb);
+}
+
+static void
+ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ const void **tb;
+ const struct wmi_pdev_temperature_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev temp ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev temperature ev temp %d pdev_id %d\n",
+ ev->temp, ev->pdev_id);
+
+ rcu_read_lock();
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
+ goto exit;
+ }
+
+ ath11k_thermal_event_temperature(ar, ev->temp);
+
+exit:
+ rcu_read_unlock();
+
+ kfree(tb);
+}
+
+static void ath11k_fils_discovery_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_fils_discovery_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab,
+ "failed to parse FILS discovery event tlv %d\n",
+ ret);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event fils discovery");
+
+ ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch FILS discovery event\n");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_warn(ab,
+ "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
+ ev->vdev_id, ev->fils_tt, ev->tbtt);
+
+ kfree(tb);
+}
+
+static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_probe_resp_tx_status_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab,
+ "failed to parse probe response transmission status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event probe resp tx status");
+
+ ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
+ if (!ev) {
+ ath11k_warn(ab,
+ "failed to fetch probe response transmission status event");
+ kfree(tb);
+ return;
+ }
+
+ if (ev->tx_status)
+ ath11k_warn(ab,
+ "Probe response transmission failed for vdev_id %u, status %u\n",
+ ev->vdev_id, ev->tx_status);
+
+ kfree(tb);
+}
+
+static int ath11k_wmi_tlv_wow_wakeup_host_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_wow_ev_arg *ev = data;
+ const char *wow_pg_fault;
+ int wow_pg_len;
+
+ switch (tag) {
+ case WMI_TAG_WOW_EVENT_INFO:
+ memcpy(ev, ptr, sizeof(*ev));
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "wow wakeup host reason %d %s\n",
+ ev->wake_reason, wow_reason(ev->wake_reason));
+ break;
+
+ case WMI_TAG_ARRAY_BYTE:
+ if (ev && ev->wake_reason == WOW_REASON_PAGE_FAULT) {
+ wow_pg_fault = ptr;
+ /* the first 4 bytes are length */
+ wow_pg_len = *(int *)wow_pg_fault;
+ wow_pg_fault += sizeof(int);
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "wow data_len = %d\n",
+ wow_pg_len);
+ ath11k_dbg_dump(ab, ATH11K_DBG_WMI,
+ "wow_event_info_type packet present",
+ "wow_pg_fault ",
+ wow_pg_fault,
+ wow_pg_len);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void ath11k_wmi_event_wow_wakeup_host(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_wow_ev_arg ev = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_wow_wakeup_host_parse,
+ &ev);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse wmi wow tlv: %d\n", ret);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event wow wakeup host");
+
+ complete(&ab->wow.wakeup_completed);
+}
+
+static void
+ath11k_wmi_diag_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event diag");
+
+ trace_ath11k_wmi_diag(ab, skb->data, skb->len);
+}
+
+static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status)
+{
+ switch (status) {
+ case WMI_ADD_TWT_STATUS_OK:
+ return "ok";
+ case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED:
+ return "twt disabled";
+ case WMI_ADD_TWT_STATUS_USED_DIALOG_ID:
+ return "dialog id in use";
+ case WMI_ADD_TWT_STATUS_INVALID_PARAM:
+ return "invalid parameters";
+ case WMI_ADD_TWT_STATUS_NOT_READY:
+ return "not ready";
+ case WMI_ADD_TWT_STATUS_NO_RESOURCE:
+ return "resource unavailable";
+ case WMI_ADD_TWT_STATUS_NO_ACK:
+ return "no ack";
+ case WMI_ADD_TWT_STATUS_NO_RESPONSE:
+ return "no response";
+ case WMI_ADD_TWT_STATUS_DENIED:
+ return "denied";
+ case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR:
+ fallthrough;
+ default:
+ return "unknown error";
+ }
+}
+
+static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_add_dialog_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab,
+ "failed to parse wmi twt add dialog status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event twt add dialog");
+
+ ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch twt add dialog wmi event\n");
+ goto exit;
+ }
+
+ if (ev->status)
+ ath11k_warn(ab,
+ "wmi add twt dialog event vdev %d dialog id %d status %s\n",
+ ev->vdev_id, ev->dialog_id,
+ ath11k_wmi_twt_add_dialog_event_status(ev->status));
+
+exit:
+ kfree(tb);
+}
+
+static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_gtk_offload_status_event *ev;
+ struct ath11k_vif *arvif;
+ __be64 replay_ctr_be;
+ u64 replay_ctr;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch gtk offload status ev");
+ kfree(tb);
+ return;
+ }
+
+ rcu_read_lock();
+
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
+ ev->vdev_id);
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event gtk offload refresh_cnt %d\n",
+ ev->refresh_cnt);
+ ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "replay_cnt",
+ NULL, ev->replay_ctr.counter, GTK_REPLAY_COUNTER_BYTES);
+
+ replay_ctr = ev->replay_ctr.word1;
+ replay_ctr = (replay_ctr << 32) | ev->replay_ctr.word0;
+ arvif->rekey_data.replay_ctr = replay_ctr;
+
+ /* supplicant expects big-endian replay counter */
+ replay_ctr_be = cpu_to_be64(replay_ctr);
+
+ ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
+ (void *)&replay_ctr_be, GFP_ATOMIC);
+exit:
+ rcu_read_unlock();
+
+ kfree(tb);
+}
+
+static void ath11k_wmi_p2p_noa_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_p2p_noa_event *ev;
+ const struct ath11k_wmi_p2p_noa_info *noa;
+ struct ath11k *ar;
+ int vdev_id;
+ u8 noa_descriptors;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ath11k_warn(ab, "failed to parse tlv: %ld\n", PTR_ERR(tb));
+ return;
+ }
+
+ ev = tb[WMI_TAG_P2P_NOA_EVENT];
+ noa = tb[WMI_TAG_P2P_NOA_INFO];
+
+ if (!ev || !noa)
+ goto out;
+
+ vdev_id = ev->vdev_id;
+ noa_descriptors = u32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_DESC_NUM);
+
+ if (noa_descriptors > WMI_P2P_MAX_NOA_DESCRIPTORS) {
+ ath11k_warn(ab, "invalid descriptor num %d in P2P NoA event\n",
+ noa_descriptors);
+ goto out;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi tlv p2p noa vdev_id %i descriptors %u\n",
+ vdev_id, noa_descriptors);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
+ vdev_id);
+ goto unlock;
+ }
+
+ ath11k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+
+unlock:
+ rcu_read_unlock();
+out:
+ kfree(tb);
+}
+
+static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_tlv_event_id id;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
+
+ trace_ath11k_wmi_event(ab, id, skb->data, skb->len);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ goto out;
+
+ switch (id) {
+ /* Process all the WMI events here */
+ case WMI_SERVICE_READY_EVENTID:
+ ath11k_service_ready_event(ab, skb);
+ break;
+ case WMI_SERVICE_READY_EXT_EVENTID:
+ ath11k_service_ready_ext_event(ab, skb);
+ break;
+ case WMI_SERVICE_READY_EXT2_EVENTID:
+ ath11k_service_ready_ext2_event(ab, skb);
+ break;
+ case WMI_REG_CHAN_LIST_CC_EVENTID:
+ ath11k_reg_chan_list_event(ab, skb, WMI_REG_CHAN_LIST_CC_ID);
+ break;
+ case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
+ ath11k_reg_chan_list_event(ab, skb, WMI_REG_CHAN_LIST_CC_EXT_ID);
+ break;
+ case WMI_READY_EVENTID:
+ ath11k_ready_event(ab, skb);
+ break;
+ case WMI_PEER_DELETE_RESP_EVENTID:
+ ath11k_peer_delete_resp_event(ab, skb);
+ break;
+ case WMI_VDEV_START_RESP_EVENTID:
+ ath11k_vdev_start_resp_event(ab, skb);
+ break;
+ case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
+ ath11k_bcn_tx_status_event(ab, skb);
+ break;
+ case WMI_VDEV_STOPPED_EVENTID:
+ ath11k_vdev_stopped_event(ab, skb);
+ break;
+ case WMI_MGMT_RX_EVENTID:
+ ath11k_mgmt_rx_event(ab, skb);
+ /* mgmt_rx_event() owns the skb now! */
+ return;
+ case WMI_MGMT_TX_COMPLETION_EVENTID:
+ ath11k_mgmt_tx_compl_event(ab, skb);
+ break;
+ case WMI_SCAN_EVENTID:
+ ath11k_scan_event(ab, skb);
+ break;
+ case WMI_PEER_STA_KICKOUT_EVENTID:
+ ath11k_peer_sta_kickout_event(ab, skb);
+ break;
+ case WMI_ROAM_EVENTID:
+ ath11k_roam_event(ab, skb);
+ break;
+ case WMI_CHAN_INFO_EVENTID:
+ ath11k_chan_info_event(ab, skb);
+ break;
+ case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath11k_pdev_bss_chan_info_event(ab, skb);
+ break;
+ case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+ ath11k_vdev_install_key_compl_event(ab, skb);
+ break;
+ case WMI_SERVICE_AVAILABLE_EVENTID:
+ ath11k_service_available_event(ab, skb);
+ break;
+ case WMI_PEER_ASSOC_CONF_EVENTID:
+ ath11k_peer_assoc_conf_event(ab, skb);
+ break;
+ case WMI_UPDATE_STATS_EVENTID:
+ ath11k_update_stats_event(ab, skb);
+ break;
+ case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
+ ath11k_pdev_ctl_failsafe_check_event(ab, skb);
+ break;
+ case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
+ ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb);
+ break;
+ case WMI_PDEV_UTF_EVENTID:
+ ath11k_tm_wmi_event(ab, id, skb);
+ break;
+ case WMI_PDEV_TEMPERATURE_EVENTID:
+ ath11k_wmi_pdev_temperature_event(ab, skb);
+ break;
+ case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
+ ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
+ break;
+ case WMI_HOST_FILS_DISCOVERY_EVENTID:
+ ath11k_fils_discovery_event(ab, skb);
+ break;
+ case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
+ ath11k_probe_resp_tx_status_event(ab, skb);
+ break;
+ case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
+ ath11k_wmi_obss_color_collision_event(ab, skb);
+ break;
+ case WMI_TWT_ADD_DIALOG_EVENTID:
+ ath11k_wmi_twt_add_dialog_event(ab, skb);
+ break;
+ case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
+ ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb);
+ break;
+ case WMI_VDEV_DELETE_RESP_EVENTID:
+ ath11k_vdev_delete_resp_event(ab, skb);
+ break;
+ case WMI_WOW_WAKEUP_HOST_EVENTID:
+ ath11k_wmi_event_wow_wakeup_host(ab, skb);
+ break;
+ case WMI_11D_NEW_COUNTRY_EVENTID:
+ ath11k_reg_11d_new_cc_event(ab, skb);
+ break;
+ case WMI_DIAG_EVENTID:
+ ath11k_wmi_diag_event(ab, skb);
+ break;
+ case WMI_PEER_STA_PS_STATECHG_EVENTID:
+ ath11k_wmi_event_peer_sta_ps_state_chg(ab, skb);
+ break;
+ case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+ ath11k_wmi_gtk_offload_status_event(ab, skb);
+ break;
+ case WMI_P2P_NOA_EVENTID:
+ ath11k_wmi_p2p_noa_event(ab, skb);
+ break;
+ default:
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "unsupported event id 0x%x\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab,
+ u32 pdev_idx)
+{
+ int status;
+ u32 svc_id[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
+
+ struct ath11k_htc_svc_conn_req conn_req;
+ struct ath11k_htc_svc_conn_resp conn_resp;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ /* these fields are the same for all service endpoints */
+ conn_req.ep_ops.ep_tx_complete = ath11k_wmi_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath11k_wmi_tlv_op_rx;
+ conn_req.ep_ops.ep_tx_credits = ath11k_wmi_op_ep_tx_credits;
+
+ /* connect to control service */
+ conn_req.service_id = svc_id[pdev_idx];
+
+ status = ath11k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
+ if (status) {
+ ath11k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
+ status);
+ return status;
+ }
+
+ ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
+ ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
+ ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
+ init_waitqueue_head(&ab->wmi_ab.wmi[pdev_idx].tx_ce_desc_wq);
+
+ return 0;
+}
+
+static int
+ath11k_wmi_send_unit_test_cmd(struct ath11k *ar,
+ struct wmi_unit_test_cmd ut_cmd,
+ u32 *test_args)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_unit_test_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ u32 *ut_cmd_args;
+ int buf_len, arg_len;
+ int ret;
+ int i;
+
+ arg_len = sizeof(u32) * ut_cmd.num_args;
+ buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_unit_test_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_UNIT_TEST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(ut_cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = ut_cmd.vdev_id;
+ cmd->module_id = ut_cmd.module_id;
+ cmd->num_args = ut_cmd.num_args;
+ cmd->diag_token = ut_cmd.diag_token;
+
+ ptr = skb->data + sizeof(ut_cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, arg_len);
+
+ ptr += TLV_HDR_SIZE;
+
+ ut_cmd_args = ptr;
+ for (i = 0; i < ut_cmd.num_args; i++)
+ ut_cmd_args[i] = test_args[i];
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd unit test module %d vdev %d n_args %d token %d\n",
+ cmd->module_id, cmd->vdev_id, cmd->num_args,
+ cmd->diag_token);
+
+ return ret;
+}
+
+int ath11k_wmi_simulate_radar(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ u32 dfs_args[DFS_MAX_TEST_ARGS];
+ struct wmi_unit_test_cmd wmi_ut;
+ bool arvif_found = false;
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arvif_found = true;
+ break;
+ }
+ }
+
+ if (!arvif_found)
+ return -EINVAL;
+
+ dfs_args[DFS_TEST_CMDID] = 0;
+ dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
+ /* Currently we could pass segment_id(b0 - b1), chirp(b2)
+ * freq offset (b3 - b10) to unit test. For simulation
+ * purpose this can be set to 0 which is valid.
+ */
+ dfs_args[DFS_TEST_RADAR_PARAM] = 0;
+
+ wmi_ut.vdev_id = arvif->vdev_id;
+ wmi_ut.module_id = DFS_UNIT_TEST_MODULE;
+ wmi_ut.num_args = DFS_MAX_TEST_ARGS;
+ wmi_ut.diag_token = DFS_UNIT_TEST_TOKEN;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Triggering Radar Simulation\n");
+
+ return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
+}
+
+int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
+ struct ath11k_fw_dbglog *dbglog)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_debug_log_config_cmd_fixed_param *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ int ret, len;
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + (MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_debug_log_config_cmd_fixed_param *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->dbg_log_param = dbglog->param;
+
+ tlv = (struct wmi_tlv *)((u8 *)cmd + sizeof(*cmd));
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+
+ switch (dbglog->param) {
+ case WMI_DEBUG_LOG_PARAM_LOG_LEVEL:
+ case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE:
+ case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE:
+ case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP:
+ cmd->value = dbglog->value;
+ break;
+ case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP:
+ case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP:
+ cmd->value = dbglog->value;
+ memcpy(tlv->value, module_id_bitmap,
+ MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ /* clear current config to be used for next user config */
+ memset(module_id_bitmap, 0,
+ MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ break;
+ default:
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_DBGLOG_CFG_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd dbglog cfg");
+
+ return ret;
+}
+
+int ath11k_wmi_connect(struct ath11k_base *ab)
+{
+ u32 i;
+ u8 wmi_ep_count;
+
+ wmi_ep_count = ab->htc.wmi_ep_count;
+ if (wmi_ep_count > ab->hw_params.max_radios)
+ return -1;
+
+ for (i = 0; i < wmi_ep_count; i++)
+ ath11k_connect_pdev_htc_service(ab, i);
+
+ return 0;
+}
+
+static void ath11k_wmi_pdev_detach(struct ath11k_base *ab, u8 pdev_id)
+{
+ if (WARN_ON(pdev_id >= MAX_RADIOS))
+ return;
+
+ /* TODO: Deinit any pdev specific wmi resource */
+}
+
+int ath11k_wmi_pdev_attach(struct ath11k_base *ab,
+ u8 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi_handle;
+
+ if (pdev_id >= ab->hw_params.max_radios)
+ return -EINVAL;
+
+ wmi_handle = &ab->wmi_ab.wmi[pdev_id];
+
+ wmi_handle->wmi_ab = &ab->wmi_ab;
+
+ ab->wmi_ab.ab = ab;
+ /* TODO: Init remaining resource specific to pdev */
+
+ return 0;
+}
+
+int ath11k_wmi_attach(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_wmi_pdev_attach(ab, 0);
+ if (ret)
+ return ret;
+
+ ab->wmi_ab.ab = ab;
+ ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
+
+ /* It's overwritten when service_ext_ready is handled */
+ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1)
+ ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
+
+ /* TODO: Init remaining wmi soc resources required */
+ init_completion(&ab->wmi_ab.service_ready);
+ init_completion(&ab->wmi_ab.unified_ready);
+
+ return 0;
+}
+
+void ath11k_wmi_detach(struct ath11k_base *ab)
+{
+ int i;
+
+ /* TODO: Deinit wmi resource specific to SOC as required */
+
+ for (i = 0; i < ab->htc.wmi_ep_count; i++)
+ ath11k_wmi_pdev_detach(ab, i);
+
+ ath11k_wmi_free_dbring_caps(ab);
+}
+
+int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 filter_bitmap, bool enable)
+{
+ struct wmi_hw_data_filter_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HW_DATA_FILTER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->enable = enable;
+
+ /* Set all modes in case of disable */
+ if (cmd->enable)
+ cmd->hw_filter_bitmap = filter_bitmap;
+ else
+ cmd->hw_filter_bitmap = ((u32)~0U);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "hw data filter enable %d filter_bitmap 0x%x\n",
+ enable, filter_bitmap);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
+}
+
+int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar)
+{
+ struct wmi_wow_host_wakeup_ind *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_host_wakeup_ind *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow host wakeup ind\n");
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
+}
+
+int ath11k_wmi_wow_enable(struct ath11k *ar)
+{
+ struct wmi_wow_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_enable_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->enable = 1;
+ cmd->pause_iface_config = WOW_IFACE_PAUSE_ENABLED;
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow enable\n");
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
+}
+
+int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
+ const u8 mac_addr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+ struct wmi_scan_prob_req_oui_cmd *cmd;
+ u32 prob_req_oui;
+ int len;
+
+ prob_req_oui = (((u32)mac_addr[0]) << 16) |
+ (((u32)mac_addr[1]) << 8) | mac_addr[2];
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_scan_prob_req_oui_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_SCAN_PROB_REQ_OUI_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->prob_req_oui = prob_req_oui;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "scan prob req oui %d\n",
+ prob_req_oui);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID);
+}
+
+int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct wmi_wow_add_del_event_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_DEL_EVT_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->is_add = enable;
+ cmd->event_bitmap = (1 << event);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow add wakeup event %s enable %d vdev_id %d\n",
+ wow_wakeup_event(event), enable, vdev_id);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
+}
+
+int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset)
+{
+ struct wmi_wow_add_pattern_cmd *cmd;
+ struct wmi_wow_bitmap_pattern *bitmap;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *ptr;
+ size_t len;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) + /* array struct */
+ sizeof(*bitmap) + /* bitmap */
+ sizeof(*tlv) + /* empty ipv4 sync */
+ sizeof(*tlv) + /* empty ipv6 sync */
+ sizeof(*tlv) + /* empty magic */
+ sizeof(*tlv) + /* empty info timeout */
+ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ /* cmd */
+ ptr = (u8 *)skb->data;
+ cmd = (struct wmi_wow_add_pattern_cmd *)ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_ADD_PATTERN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->pattern_id = pattern_id;
+ cmd->pattern_type = WOW_BITMAP_PATTERN;
+
+ ptr += sizeof(*cmd);
+
+ /* bitmap */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap));
+
+ ptr += sizeof(*tlv);
+
+ bitmap = (struct wmi_wow_bitmap_pattern *)ptr;
+ bitmap->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_BITMAP_PATTERN_T) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap) - TLV_HDR_SIZE);
+
+ memcpy(bitmap->patternbuf, pattern, pattern_len);
+ ath11k_ce_byte_swap(bitmap->patternbuf, roundup(pattern_len, 4));
+ memcpy(bitmap->bitmaskbuf, mask, pattern_len);
+ ath11k_ce_byte_swap(bitmap->bitmaskbuf, roundup(pattern_len, 4));
+ bitmap->pattern_offset = pattern_offset;
+ bitmap->pattern_len = pattern_len;
+ bitmap->bitmask_len = pattern_len;
+ bitmap->pattern_id = pattern_id;
+
+ ptr += sizeof(*bitmap);
+
+ /* ipv4 sync */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ipv6 sync */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* magic */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* pattern info timeout */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ratelimit interval */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(u32));
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d\n",
+ vdev_id, pattern_id, pattern_offset);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
+}
+
+int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id)
+{
+ struct wmi_wow_del_pattern_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_DEL_PATTERN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->pattern_id = pattern_id;
+ cmd->pattern_type = WOW_BITMAP_PATTERN;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow del pattern vdev_id %d pattern_id %d\n",
+ vdev_id, pattern_id);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
+}
+
+static struct sk_buff *
+ath11k_wmi_op_gen_config_pno_start(struct ath11k *ar,
+ u32 vdev_id,
+ struct wmi_pno_scan_req *pno)
+{
+ struct nlo_configured_parameters *nlo_list;
+ struct wmi_wow_nlo_config_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u32 *channel_list;
+ size_t len, nlo_list_len, channel_list_len;
+ u8 *ptr;
+ u32 i;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_parameters(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+
+ channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
+ len += channel_list_len;
+
+ nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
+ len += nlo_list_len;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (u8 *)skb->data;
+ cmd = (struct wmi_wow_nlo_config_cmd *)ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = pno->vdev_id;
+ cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN;
+
+ /* current FW does not support min-max range for dwell time */
+ cmd->active_dwell_time = pno->active_max_time;
+ cmd->passive_dwell_time = pno->passive_max_time;
+
+ if (pno->do_passive_scan)
+ cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE;
+
+ cmd->fast_scan_period = pno->fast_scan_period;
+ cmd->slow_scan_period = pno->slow_scan_period;
+ cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles;
+ cmd->delay_start_time = pno->delay_start_time;
+
+ if (pno->enable_pno_scan_randomization) {
+ cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
+ WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ;
+ ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
+ ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
+ ath11k_ce_byte_swap(cmd->mac_addr.addr, 8);
+ ath11k_ce_byte_swap(cmd->mac_mask.addr, 8);
+ }
+
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_parameters(nlo_list) */
+ cmd->no_of_ssids = pno->uc_networks_count;
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, nlo_list_len);
+
+ ptr += sizeof(*tlv);
+ nlo_list = (struct nlo_configured_parameters *)ptr;
+ for (i = 0; i < cmd->no_of_ssids; i++) {
+ tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*nlo_list) - sizeof(*tlv));
+
+ nlo_list[i].ssid.valid = true;
+ nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
+ memcpy(nlo_list[i].ssid.ssid.ssid,
+ pno->a_networks[i].ssid.ssid,
+ nlo_list[i].ssid.ssid.ssid_len);
+ ath11k_ce_byte_swap(nlo_list[i].ssid.ssid.ssid,
+ roundup(nlo_list[i].ssid.ssid.ssid_len, 4));
+
+ if (pno->a_networks[i].rssi_threshold &&
+ pno->a_networks[i].rssi_threshold > -300) {
+ nlo_list[i].rssi_cond.valid = true;
+ nlo_list[i].rssi_cond.rssi =
+ pno->a_networks[i].rssi_threshold;
+ }
+
+ nlo_list[i].bcast_nw_type.valid = true;
+ nlo_list[i].bcast_nw_type.bcast_nw_type =
+ pno->a_networks[i].bcast_nw_type;
+ }
+
+ ptr += nlo_list_len;
+ cmd->num_of_channels = pno->a_networks[0].channel_count;
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, channel_list_len);
+ ptr += sizeof(*tlv);
+ channel_list = (u32 *)ptr;
+ for (i = 0; i < cmd->num_of_channels; i++)
+ channel_list[i] = pno->a_networks[0].channels[i];
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv start pno config vdev_id %d\n",
+ vdev_id);
+
+ return skb;
+}
+
+static struct sk_buff *ath11k_wmi_op_gen_config_pno_stop(struct ath11k *ar,
+ u32 vdev_id)
+{
+ struct wmi_wow_nlo_config_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->flags = WMI_NLO_CONFIG_STOP;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "tlv stop pno config vdev_id %d\n", vdev_id);
+ return skb;
+}
+
+int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan)
+{
+ struct sk_buff *skb;
+
+ if (pno_scan->enable)
+ skb = ath11k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
+ else
+ skb = ath11k_wmi_op_gen_config_pno_stop(ar, vdev_id);
+
+ if (IS_ERR_OR_NULL(skb))
+ return -ENOMEM;
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
+}
+
+static void ath11k_wmi_fill_ns_offload(struct ath11k *ar,
+ struct ath11k_arp_ns_offload *offload,
+ u8 **ptr,
+ bool enable,
+ bool ext)
+{
+ struct wmi_ns_offload_tuple *ns;
+ struct wmi_tlv *tlv;
+ u8 *buf_ptr = *ptr;
+ u32 ns_cnt, ns_ext_tuples;
+ int i, max_offloads;
+
+ ns_cnt = offload->ipv6_count;
+
+ tlv = (struct wmi_tlv *)buf_ptr;
+
+ if (ext) {
+ ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, ns_ext_tuples * sizeof(*ns));
+ i = WMI_MAX_NS_OFFLOADS;
+ max_offloads = offload->ipv6_count;
+ } else {
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, WMI_MAX_NS_OFFLOADS * sizeof(*ns));
+ i = 0;
+ max_offloads = WMI_MAX_NS_OFFLOADS;
+ }
+
+ buf_ptr += sizeof(*tlv);
+
+ for (; i < max_offloads; i++) {
+ ns = (struct wmi_ns_offload_tuple *)buf_ptr;
+ ns->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NS_OFFLOAD_TUPLE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*ns) - TLV_HDR_SIZE);
+
+ if (enable) {
+ if (i < ns_cnt)
+ ns->flags |= WMI_NSOL_FLAGS_VALID;
+
+ memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
+ memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
+ ath11k_ce_byte_swap(ns->target_ipaddr[0], 16);
+ ath11k_ce_byte_swap(ns->solicitation_ipaddr, 16);
+
+ if (offload->ipv6_type[i])
+ ns->flags |= WMI_NSOL_FLAGS_IS_IPV6_ANYCAST;
+
+ memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
+ ath11k_ce_byte_swap(ns->target_mac.addr, 8);
+
+ if (ns->target_mac.word0 != 0 ||
+ ns->target_mac.word1 != 0) {
+ ns->flags |= WMI_NSOL_FLAGS_MAC_VALID;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "index %d ns_solicited %pI6 target %pI6",
+ i, ns->solicitation_ipaddr,
+ ns->target_ipaddr[0]);
+ }
+
+ buf_ptr += sizeof(*ns);
+ }
+
+ *ptr = buf_ptr;
+}
+
+static void ath11k_wmi_fill_arp_offload(struct ath11k *ar,
+ struct ath11k_arp_ns_offload *offload,
+ u8 **ptr,
+ bool enable)
+{
+ struct wmi_arp_offload_tuple *arp;
+ struct wmi_tlv *tlv;
+ u8 *buf_ptr = *ptr;
+ int i;
+
+ /* fill arp tuple */
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
+ buf_ptr += sizeof(*tlv);
+
+ for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
+ arp = (struct wmi_arp_offload_tuple *)buf_ptr;
+ arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARP_OFFLOAD_TUPLE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
+
+ if (enable && i < offload->ipv4_count) {
+ /* Copy the target ip addr and flags */
+ arp->flags = WMI_ARPOL_FLAGS_VALID;
+ memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
+ ath11k_ce_byte_swap(arp->target_ipaddr, 4);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "arp offload address %pI4",
+ arp->target_ipaddr);
+ }
+
+ buf_ptr += sizeof(*arp);
+ }
+
+ *ptr = buf_ptr;
+}
+
+int ath11k_wmi_arp_ns_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable)
+{
+ struct ath11k_arp_ns_offload *offload;
+ struct wmi_set_arp_ns_offload_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *buf_ptr;
+ size_t len;
+ u8 ns_cnt, ns_ext_tuples = 0;
+
+ offload = &arvif->arp_ns_offload;
+ ns_cnt = offload->ipv6_count;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_tuple) +
+ sizeof(*tlv) +
+ WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_tuple);
+
+ if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
+ ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
+ len += sizeof(*tlv) +
+ ns_ext_tuples * sizeof(struct wmi_ns_offload_tuple);
+ }
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ buf_ptr = skb->data;
+ cmd = (struct wmi_set_arp_ns_offload_cmd *)buf_ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_SET_ARP_NS_OFFLOAD_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->flags = 0;
+ cmd->vdev_id = arvif->vdev_id;
+ cmd->num_ns_ext_tuples = ns_ext_tuples;
+
+ buf_ptr += sizeof(*cmd);
+
+ ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
+ ath11k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
+
+ if (ns_ext_tuples)
+ ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
+}
+
+int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable)
+{
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ struct ath11k_rekey_data *rekey_data = &arvif->rekey_data;
+ int len;
+ struct sk_buff *skb;
+ __le64 replay_ctr;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = arvif->vdev_id;
+
+ if (enable) {
+ cmd->flags = GTK_OFFLOAD_ENABLE_OPCODE;
+
+ /* the length in rekey_data and cmd is equal */
+ memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
+ ath11k_ce_byte_swap(cmd->kck, GTK_OFFLOAD_KEK_BYTES);
+ memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
+ ath11k_ce_byte_swap(cmd->kek, GTK_OFFLOAD_KEK_BYTES);
+
+ replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
+ memcpy(cmd->replay_ctr, &replay_ctr,
+ sizeof(replay_ctr));
+ ath11k_ce_byte_swap(cmd->replay_ctr, GTK_REPLAY_COUNTER_BYTES);
+ } else {
+ cmd->flags = GTK_OFFLOAD_DISABLE_OPCODE;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
+ arvif->vdev_id, enable);
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar,
+ struct ath11k_vif *arvif)
+{
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ int len;
+ struct sk_buff *skb;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = arvif->vdev_id;
+ cmd->flags = GTK_OFFLOAD_REQUEST_STATUS_OPCODE;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
+ arvif->vdev_id);
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val)
+{ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_sar_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *buf_ptr;
+ u32 len, sar_len_aligned, rsvd_len_aligned;
+
+ sar_len_aligned = roundup(BIOS_SAR_TABLE_LEN, sizeof(u32));
+ rsvd_len_aligned = roundup(BIOS_SAR_RSVD1_LEN, sizeof(u32));
+ len = sizeof(*cmd) +
+ TLV_HDR_SIZE + sar_len_aligned +
+ TLV_HDR_SIZE + rsvd_len_aligned;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_sar_table_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ cmd->sar_len = BIOS_SAR_TABLE_LEN;
+ cmd->rsvd_len = BIOS_SAR_RSVD1_LEN;
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, sar_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, sar_val, BIOS_SAR_TABLE_LEN);
+
+ buf_ptr += sar_len_aligned;
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
+
+ return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
+}
+
+int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_geo_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *buf_ptr;
+ u32 len, rsvd_len_aligned;
+
+ rsvd_len_aligned = roundup(BIOS_SAR_RSVD2_LEN, sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + rsvd_len_aligned;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_geo_table_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ cmd->rsvd_len = BIOS_SAR_RSVD2_LEN;
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
+
+ return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
+}
+
+int ath11k_wmi_sta_keepalive(struct ath11k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_sta_keepalive_cmd *cmd;
+ struct wmi_sta_keepalive_arp_resp *arp;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd) + sizeof(*arp);
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_KEEPALIVE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ cmd->enabled = arg->enabled;
+ cmd->interval = arg->interval;
+ cmd->method = arg->method;
+
+ arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1);
+ arp->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
+
+ if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
+ arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
+ arp->src_ip4_addr = arg->src_ip4_addr;
+ arp->dest_ip4_addr = arg->dest_ip4_addr;
+ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "sta keepalive vdev %d enabled %d method %d interval %d\n",
+ arg->vdev_id, arg->enabled, arg->method, arg->interval);
+
+ return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
+}
+
+bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar)
+{
+ return test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
+ ar->ab->wmi_ab.svc_map) && ar->supports_6ghz;
+}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
new file mode 100644
index 000000000000..0f0de24a3840
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -0,0 +1,6535 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef ATH11K_WMI_H
+#define ATH11K_WMI_H
+
+#include <net/mac80211.h>
+#include "htc.h"
+
+struct ath11k_base;
+struct ath11k;
+struct ath11k_fw_stats;
+struct ath11k_fw_dbglog;
+struct ath11k_vif;
+struct ath11k_reg_tpc_power_info;
+
+#define PSOC_HOST_MAX_NUM_SS (8)
+
+/* defines to set Packet extension values which can be 0 us, 8 usec or 16 usec */
+#define MAX_HE_NSS 8
+#define MAX_HE_MODULATION 8
+#define MAX_HE_RU 4
+#define HE_MODULATION_NONE 7
+#define HE_PET_0_USEC 0
+#define HE_PET_8_USEC 1
+#define HE_PET_16_USEC 2
+
+#define WMI_MAX_CHAINS 8
+
+#define WMI_MAX_NUM_SS MAX_HE_NSS
+#define WMI_MAX_NUM_RU MAX_HE_RU
+
+#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_CMD_UNSUPPORTED 0
+#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+
+struct wmi_cmd_hdr {
+ u32 cmd_id;
+} __packed;
+
+struct wmi_tlv {
+ u32 header;
+ u8 value[];
+} __packed;
+
+#define WMI_TLV_LEN GENMASK(15, 0)
+#define WMI_TLV_TAG GENMASK(31, 16)
+#define TLV_HDR_SIZE sizeof_field(struct wmi_tlv, header)
+
+#define WMI_CMD_HDR_CMD_ID GENMASK(23, 0)
+#define WMI_MAX_MEM_REQS 32
+#define ATH11K_MAX_HW_LISTEN_INTERVAL 5
+
+#define WLAN_SCAN_MAX_HINT_S_SSID 10
+#define WLAN_SCAN_MAX_HINT_BSSID 10
+#define MAX_RNR_BSS 5
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 512
+
+#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
+
+#define MAX_WMI_UTF_LEN 252
+#define WMI_BA_MODE_BUFFER_SIZE_256 3
+/*
+ * HW mode config type replicated from FW header
+ * @WMI_HOST_HW_MODE_SINGLE: Only one PHY is active.
+ * @WMI_HOST_HW_MODE_DBS: Both PHYs are active in different bands,
+ * one in 2G and another in 5G.
+ * @WMI_HOST_HW_MODE_SBS_PASSIVE: Both PHYs are in passive mode (only rx) in
+ * same band; no tx allowed.
+ * @WMI_HOST_HW_MODE_SBS: Both PHYs are active in the same band.
+ * Support for both PHYs within one band is planned
+ * for 5G only(as indicated in WMI_MAC_PHY_CAPABILITIES),
+ * but could be extended to other bands in the future.
+ * The separation of the band between the two PHYs needs
+ * to be communicated separately.
+ * @WMI_HOST_HW_MODE_DBS_SBS: 3 PHYs, with 2 on the same band doing SBS
+ * as in WMI_HW_MODE_SBS, and 3rd on the other band
+ * @WMI_HOST_HW_MODE_DBS_OR_SBS: Two PHY with one PHY capabale of both 2G and
+ * 5G. It can support SBS (5G + 5G) OR DBS (5G + 2G).
+ * @WMI_HOST_HW_MODE_MAX: Max hw_mode_id. Used to indicate invalid mode.
+ */
+enum wmi_host_hw_mode_config_type {
+ WMI_HOST_HW_MODE_SINGLE = 0,
+ WMI_HOST_HW_MODE_DBS = 1,
+ WMI_HOST_HW_MODE_SBS_PASSIVE = 2,
+ WMI_HOST_HW_MODE_SBS = 3,
+ WMI_HOST_HW_MODE_DBS_SBS = 4,
+ WMI_HOST_HW_MODE_DBS_OR_SBS = 5,
+
+ /* keep last */
+ WMI_HOST_HW_MODE_MAX
+};
+
+/* HW mode priority values used to detect the preferred HW mode
+ * on the available modes.
+ */
+enum wmi_host_hw_mode_priority {
+ WMI_HOST_HW_MODE_DBS_SBS_PRI,
+ WMI_HOST_HW_MODE_DBS_PRI,
+ WMI_HOST_HW_MODE_DBS_OR_SBS_PRI,
+ WMI_HOST_HW_MODE_SBS_PRI,
+ WMI_HOST_HW_MODE_SBS_PASSIVE_PRI,
+ WMI_HOST_HW_MODE_SINGLE_PRI,
+
+ /* keep last the lowest priority */
+ WMI_HOST_HW_MODE_MAX_PRI
+};
+
+enum WMI_HOST_WLAN_BAND {
+ WMI_HOST_WLAN_2G_CAP = 0x1,
+ WMI_HOST_WLAN_5G_CAP = 0x2,
+ WMI_HOST_WLAN_2G_5G_CAP = WMI_HOST_WLAN_2G_CAP | WMI_HOST_WLAN_5G_CAP,
+};
+
+/* Parameters used for WMI_VDEV_PARAM_AUTORATE_MISC_CFG command.
+ * Used only for HE auto rate mode.
+ */
+enum {
+ /* HE LTF related configuration */
+ WMI_HE_AUTORATE_LTF_1X = BIT(0),
+ WMI_HE_AUTORATE_LTF_2X = BIT(1),
+ WMI_HE_AUTORATE_LTF_4X = BIT(2),
+
+ /* HE GI related configuration */
+ WMI_AUTORATE_400NS_GI = BIT(8),
+ WMI_AUTORATE_800NS_GI = BIT(9),
+ WMI_AUTORATE_1600NS_GI = BIT(10),
+ WMI_AUTORATE_3200NS_GI = BIT(11),
+};
+
+enum {
+ WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP = 0x00000001,
+ WMI_HOST_VDEV_FLAGS_TRANSMIT_AP = 0x00000002,
+ WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP = 0x00000004,
+ WMI_HOST_VDEV_FLAGS_EMA_MODE = 0x00000008,
+ WMI_HOST_VDEV_FLAGS_SCAN_MODE_VAP = 0x00000010,
+};
+
+/*
+ * wmi command groups.
+ */
+enum wmi_cmd_group {
+ /* 0 to 2 are reserved */
+ WMI_GRP_START = 0x3,
+ WMI_GRP_SCAN = WMI_GRP_START,
+ WMI_GRP_PDEV = 0x4,
+ WMI_GRP_VDEV = 0x5,
+ WMI_GRP_PEER = 0x6,
+ WMI_GRP_MGMT = 0x7,
+ WMI_GRP_BA_NEG = 0x8,
+ WMI_GRP_STA_PS = 0x9,
+ WMI_GRP_DFS = 0xa,
+ WMI_GRP_ROAM = 0xb,
+ WMI_GRP_OFL_SCAN = 0xc,
+ WMI_GRP_P2P = 0xd,
+ WMI_GRP_AP_PS = 0xe,
+ WMI_GRP_RATE_CTRL = 0xf,
+ WMI_GRP_PROFILE = 0x10,
+ WMI_GRP_SUSPEND = 0x11,
+ WMI_GRP_BCN_FILTER = 0x12,
+ WMI_GRP_WOW = 0x13,
+ WMI_GRP_RTT = 0x14,
+ WMI_GRP_SPECTRAL = 0x15,
+ WMI_GRP_STATS = 0x16,
+ WMI_GRP_ARP_NS_OFL = 0x17,
+ WMI_GRP_NLO_OFL = 0x18,
+ WMI_GRP_GTK_OFL = 0x19,
+ WMI_GRP_CSA_OFL = 0x1a,
+ WMI_GRP_CHATTER = 0x1b,
+ WMI_GRP_TID_ADDBA = 0x1c,
+ WMI_GRP_MISC = 0x1d,
+ WMI_GRP_GPIO = 0x1e,
+ WMI_GRP_FWTEST = 0x1f,
+ WMI_GRP_TDLS = 0x20,
+ WMI_GRP_RESMGR = 0x21,
+ WMI_GRP_STA_SMPS = 0x22,
+ WMI_GRP_WLAN_HB = 0x23,
+ WMI_GRP_RMC = 0x24,
+ WMI_GRP_MHF_OFL = 0x25,
+ WMI_GRP_LOCATION_SCAN = 0x26,
+ WMI_GRP_OEM = 0x27,
+ WMI_GRP_NAN = 0x28,
+ WMI_GRP_COEX = 0x29,
+ WMI_GRP_OBSS_OFL = 0x2a,
+ WMI_GRP_LPI = 0x2b,
+ WMI_GRP_EXTSCAN = 0x2c,
+ WMI_GRP_DHCP_OFL = 0x2d,
+ WMI_GRP_IPA = 0x2e,
+ WMI_GRP_MDNS_OFL = 0x2f,
+ WMI_GRP_SAP_OFL = 0x30,
+ WMI_GRP_OCB = 0x31,
+ WMI_GRP_SOC = 0x32,
+ WMI_GRP_PKT_FILTER = 0x33,
+ WMI_GRP_MAWC = 0x34,
+ WMI_GRP_PMF_OFFLOAD = 0x35,
+ WMI_GRP_BPF_OFFLOAD = 0x36,
+ WMI_GRP_NAN_DATA = 0x37,
+ WMI_GRP_PROTOTYPE = 0x38,
+ WMI_GRP_MONITOR = 0x39,
+ WMI_GRP_REGULATORY = 0x3a,
+ WMI_GRP_HW_DATA_FILTER = 0x3b,
+ WMI_GRP_WLM = 0x3c,
+ WMI_GRP_11K_OFFLOAD = 0x3d,
+ WMI_GRP_TWT = 0x3e,
+ WMI_GRP_MOTION_DET = 0x3f,
+ WMI_GRP_SPATIAL_REUSE = 0x40,
+};
+
+#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
+
+#define WMI_CMD_UNSUPPORTED 0
+
+enum wmi_tlv_cmd_id {
+ WMI_INIT_CMDID = 0x1,
+ WMI_START_SCAN_CMDID = WMI_TLV_CMD(WMI_GRP_SCAN),
+ WMI_STOP_SCAN_CMDID,
+ WMI_SCAN_CHAN_LIST_CMDID,
+ WMI_SCAN_SCH_PRIO_TBL_CMDID,
+ WMI_SCAN_UPDATE_REQUEST_CMDID,
+ WMI_SCAN_PROB_REQ_OUI_CMDID,
+ WMI_SCAN_ADAPTIVE_DWELL_CONFIG_CMDID,
+ WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_GRP_PDEV),
+ WMI_PDEV_SET_CHANNEL_CMDID,
+ WMI_PDEV_SET_PARAM_CMDID,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_PDEV_DUMP_CMDID,
+ WMI_PDEV_SET_LED_CONFIG_CMDID,
+ WMI_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_PDEV_SET_LED_FLASHING_CMDID,
+ WMI_PDEV_SMART_ANT_ENABLE_CMDID,
+ WMI_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ WMI_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ WMI_PDEV_SET_CTL_TABLE_CMDID,
+ WMI_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ WMI_PDEV_FIPS_CMDID,
+ WMI_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+ WMI_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+ WMI_PDEV_GET_NFCAL_POWER_CMDID,
+ WMI_PDEV_GET_TPC_CMDID,
+ WMI_MIB_STATS_ENABLE_CMDID,
+ WMI_PDEV_SET_PCL_CMDID,
+ WMI_PDEV_SET_HW_MODE_CMDID,
+ WMI_PDEV_SET_MAC_CONFIG_CMDID,
+ WMI_PDEV_SET_ANTENNA_MODE_CMDID,
+ WMI_SET_PERIODIC_CHANNEL_STATS_CONFIG_CMDID,
+ WMI_PDEV_WAL_POWER_DEBUG_CMDID,
+ WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID,
+ WMI_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_PDEV_GET_ANTDIV_STATUS_CMDID,
+ WMI_PDEV_GET_CHIP_POWER_STATS_CMDID,
+ WMI_PDEV_SET_STATS_THRESHOLD_CMDID,
+ WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID,
+ WMI_PDEV_UPDATE_PKT_ROUTING_CMDID,
+ WMI_PDEV_CHECK_CAL_VERSION_CMDID,
+ WMI_PDEV_SET_DIVERSITY_GAIN_CMDID,
+ WMI_PDEV_DIV_GET_RSSI_ANTID_CMDID,
+ WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ WMI_PDEV_UPDATE_PMK_CACHE_CMDID,
+ WMI_PDEV_UPDATE_FILS_HLP_PKT_CMDID,
+ WMI_PDEV_UPDATE_CTLTABLE_REQUEST_CMDID,
+ WMI_PDEV_CONFIG_VENDOR_OUI_ACTION_CMDID,
+ WMI_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMDID,
+ WMI_PDEV_SET_RX_FILTER_PROMISCUOUS_CMDID,
+ WMI_PDEV_DMA_RING_CFG_REQ_CMDID,
+ WMI_PDEV_HE_TB_ACTION_FRM_CMDID,
+ WMI_PDEV_PKTLOG_FILTER_CMDID,
+ WMI_PDEV_SET_RAP_CONFIG_CMDID,
+ WMI_PDEV_DSM_FILTER_CMDID,
+ WMI_PDEV_FRAME_INJECT_CMDID,
+ WMI_PDEV_TBTT_OFFSET_SYNC_CMDID,
+ WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID,
+ WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID,
+ WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_GET_TPC_STATS_CMDID,
+ WMI_PDEV_ENABLE_DURATION_BASED_TX_MODE_SELECTION_CMDID,
+ WMI_PDEV_GET_DPD_STATUS_CMDID,
+ WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID,
+ WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID,
+ WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
+ WMI_VDEV_DELETE_CMDID,
+ WMI_VDEV_START_REQUEST_CMDID,
+ WMI_VDEV_RESTART_REQUEST_CMDID,
+ WMI_VDEV_UP_CMDID,
+ WMI_VDEV_STOP_CMDID,
+ WMI_VDEV_DOWN_CMDID,
+ WMI_VDEV_SET_PARAM_CMDID,
+ WMI_VDEV_INSTALL_KEY_CMDID,
+ WMI_VDEV_WNM_SLEEPMODE_CMDID,
+ WMI_VDEV_WMM_ADDTS_CMDID,
+ WMI_VDEV_WMM_DELTS_CMDID,
+ WMI_VDEV_SET_WMM_PARAMS_CMDID,
+ WMI_VDEV_SET_GTX_PARAMS_CMDID,
+ WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
+ WMI_VDEV_PLMREQ_START_CMDID,
+ WMI_VDEV_PLMREQ_STOP_CMDID,
+ WMI_VDEV_TSF_TSTAMP_ACTION_CMDID,
+ WMI_VDEV_SET_IE_CMDID,
+ WMI_VDEV_RATEMASK_CMDID,
+ WMI_VDEV_ATF_REQUEST_CMDID,
+ WMI_VDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+ WMI_VDEV_SET_QUIET_MODE_CMDID,
+ WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
+ WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
+ WMI_VDEV_SET_ARP_STAT_CMDID,
+ WMI_VDEV_GET_ARP_STAT_CMDID,
+ WMI_VDEV_GET_TX_POWER_CMDID,
+ WMI_VDEV_LIMIT_OFFCHAN_CMDID,
+ WMI_VDEV_SET_CUSTOM_SW_RETRY_TH_CMDID,
+ WMI_VDEV_CHAINMASK_CONFIG_CMDID,
+ WMI_VDEV_GET_BCN_RECEPTION_STATS_CMDID,
+ WMI_VDEV_GET_MWS_COEX_INFO_CMDID,
+ WMI_VDEV_DELETE_ALL_PEER_CMDID,
+ WMI_VDEV_BSS_MAX_IDLE_TIME_CMDID,
+ WMI_VDEV_AUDIO_SYNC_TRIGGER_CMDID,
+ WMI_VDEV_AUDIO_SYNC_QTIMER_CMDID,
+ WMI_VDEV_SET_PCL_CMDID,
+ WMI_VDEV_GET_BIG_DATA_CMDID,
+ WMI_VDEV_GET_BIG_DATA_P2_CMDID,
+ WMI_VDEV_SET_TPC_POWER_CMDID,
+ WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
+ WMI_PEER_DELETE_CMDID,
+ WMI_PEER_FLUSH_TIDS_CMDID,
+ WMI_PEER_SET_PARAM_CMDID,
+ WMI_PEER_ASSOC_CMDID,
+ WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_PEER_MCAST_GROUP_CMDID,
+ WMI_PEER_INFO_REQ_CMDID,
+ WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
+ WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID,
+ WMI_PEER_UPDATE_WDS_ENTRY_CMDID,
+ WMI_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+ WMI_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ WMI_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ WMI_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ WMI_PEER_ATF_REQUEST_CMDID,
+ WMI_PEER_BWF_REQUEST_CMDID,
+ WMI_PEER_REORDER_QUEUE_SETUP_CMDID,
+ WMI_PEER_REORDER_QUEUE_REMOVE_CMDID,
+ WMI_PEER_SET_RX_BLOCKSIZE_CMDID,
+ WMI_PEER_ANTDIV_INFO_REQ_CMDID,
+ WMI_BCN_TX_CMDID = WMI_TLV_CMD(WMI_GRP_MGMT),
+ WMI_PDEV_SEND_BCN_CMDID,
+ WMI_BCN_TMPL_CMDID,
+ WMI_BCN_FILTER_RX_CMDID,
+ WMI_PRB_REQ_FILTER_RX_CMDID,
+ WMI_MGMT_TX_CMDID,
+ WMI_PRB_TMPL_CMDID,
+ WMI_MGMT_TX_SEND_CMDID,
+ WMI_OFFCHAN_DATA_TX_SEND_CMDID,
+ WMI_PDEV_SEND_FD_CMDID,
+ WMI_BCN_OFFLOAD_CTRL_CMDID,
+ WMI_BSS_COLOR_CHANGE_ENABLE_CMDID,
+ WMI_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMDID,
+ WMI_FILS_DISCOVERY_TMPL_CMDID,
+ WMI_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+ WMI_ADDBA_SEND_CMDID,
+ WMI_ADDBA_STATUS_CMDID,
+ WMI_DELBA_SEND_CMDID,
+ WMI_ADDBA_SET_RESP_CMDID,
+ WMI_SEND_SINGLEAMSDU_CMDID,
+ WMI_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_PS),
+ WMI_STA_POWERSAVE_PARAM_CMDID,
+ WMI_STA_MIMO_PS_MODE_CMDID,
+ WMI_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_DFS),
+ WMI_PDEV_DFS_DISABLE_CMDID,
+ WMI_DFS_PHYERR_FILTER_ENA_CMDID,
+ WMI_DFS_PHYERR_FILTER_DIS_CMDID,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID,
+ WMI_VDEV_ADFS_CH_CFG_CMDID,
+ WMI_VDEV_ADFS_OCAC_ABORT_CMDID,
+ WMI_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_GRP_ROAM),
+ WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_ROAM_SCAN_PERIOD,
+ WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_ROAM_AP_PROFILE,
+ WMI_ROAM_CHAN_LIST,
+ WMI_ROAM_SCAN_CMD,
+ WMI_ROAM_SYNCH_COMPLETE,
+ WMI_ROAM_SET_RIC_REQUEST_CMDID,
+ WMI_ROAM_INVOKE_CMDID,
+ WMI_ROAM_FILTER_CMDID,
+ WMI_ROAM_SUBNET_CHANGE_CONFIG_CMDID,
+ WMI_ROAM_CONFIGURE_MAWC_CMDID,
+ WMI_ROAM_SET_MBO_PARAM_CMDID,
+ WMI_ROAM_PER_CONFIG_CMDID,
+ WMI_ROAM_BTM_CONFIG_CMDID,
+ WMI_ENABLE_FILS_CMDID,
+ WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_GRP_OFL_SCAN),
+ WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_OFL_SCAN_PERIOD,
+ WMI_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_GRP_P2P),
+ WMI_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_P2P_GO_SET_BEACON_IE,
+ WMI_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+ WMI_P2P_DISC_OFFLOAD_CONFIG_CMDID,
+ WMI_P2P_DISC_OFFLOAD_APPIE_CMDID,
+ WMI_P2P_DISC_OFFLOAD_PATTERN_CMDID,
+ WMI_P2P_SET_OPPPS_PARAM_CMDID,
+ WMI_P2P_LISTEN_OFFLOAD_START_CMDID,
+ WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID,
+ WMI_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+ WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_AP_PS_EGAP_PARAM_CMDID,
+ WMI_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_GRP_RATE_CTRL),
+ WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_GRP_PROFILE),
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+ WMI_PDEV_RESUME_CMDID,
+ WMI_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_BCN_FILTER),
+ WMI_RMV_BCN_FILTER_CMDID,
+ WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_GRP_WOW),
+ WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_WOW_ENABLE_CMDID,
+ WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_WOW_IOAC_ADD_KEEPALIVE_CMDID,
+ WMI_WOW_IOAC_DEL_KEEPALIVE_CMDID,
+ WMI_WOW_IOAC_ADD_WAKE_PATTERN_CMDID,
+ WMI_WOW_IOAC_DEL_WAKE_PATTERN_CMDID,
+ WMI_D0_WOW_ENABLE_DISABLE_CMDID,
+ WMI_EXTWOW_ENABLE_CMDID,
+ WMI_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
+ WMI_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
+ WMI_WOW_ENABLE_ICMPV6_NA_FLT_CMDID,
+ WMI_WOW_UDP_SVC_OFLD_CMDID,
+ WMI_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMDID,
+ WMI_WOW_SET_ACTION_WAKE_UP_CMDID,
+ WMI_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_GRP_RTT),
+ WMI_RTT_TSF_CMDID,
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_TLV_CMD(WMI_GRP_SPECTRAL),
+ WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_GRP_STATS),
+ WMI_MCC_SCHED_TRAFFIC_STATS_CMDID,
+ WMI_REQUEST_STATS_EXT_CMDID,
+ WMI_REQUEST_LINK_STATS_CMDID,
+ WMI_START_LINK_STATS_CMDID,
+ WMI_CLEAR_LINK_STATS_CMDID,
+ WMI_GET_FW_MEM_DUMP_CMDID,
+ WMI_DEBUG_MESG_FLUSH_CMDID,
+ WMI_DIAG_EVENT_LOG_CONFIG_CMDID,
+ WMI_REQUEST_WLAN_STATS_CMDID,
+ WMI_REQUEST_RCPI_CMDID,
+ WMI_REQUEST_PEER_STATS_INFO_CMDID,
+ WMI_REQUEST_RADIO_CHAN_STATS_CMDID,
+ WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL),
+ WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+ WMI_APFIND_CMDID,
+ WMI_PASSPOINT_LIST_CONFIG_CMDID,
+ WMI_NLO_CONFIGURE_MAWC_CMDID,
+ WMI_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+ WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+ WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ WMI_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+ WMI_CHATTER_ADD_COALESCING_FILTER_CMDID,
+ WMI_CHATTER_DELETE_COALESCING_FILTER_CMDID,
+ WMI_CHATTER_COALESCING_QUERY_CMDID,
+ WMI_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_GRP_TID_ADDBA),
+ WMI_PEER_TID_DELBA_CMDID,
+ WMI_STA_DTIM_PS_METHOD_CMDID,
+ WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+ WMI_STA_KEEPALIVE_CMDID,
+ WMI_BA_REQ_SSN_CMDID,
+ WMI_ECHO_CMDID = WMI_TLV_CMD(WMI_GRP_MISC),
+ WMI_PDEV_UTF_CMDID,
+ WMI_DBGLOG_CFG_CMDID,
+ WMI_PDEV_QVIT_CMDID,
+ WMI_PDEV_FTM_INTG_CMDID,
+ WMI_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_FORCE_FW_HANG_CMDID,
+ WMI_SET_MCASTBCAST_FILTER_CMDID,
+ WMI_THERMAL_MGMT_CMDID,
+ WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID,
+ WMI_TPC_CHAINMASK_CONFIG_CMDID,
+ WMI_SET_ANTENNA_DIVERSITY_CMDID,
+ WMI_OCB_SET_SCHED_CMDID,
+ WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID,
+ WMI_LRO_CONFIG_CMDID,
+ WMI_TRANSFER_DATA_TO_FLASH_CMDID,
+ WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID,
+ WMI_VDEV_WISA_CMDID,
+ WMI_DBGLOG_TIME_STAMP_SYNC_CMDID,
+ WMI_SET_MULTIPLE_MCAST_FILTER_CMDID,
+ WMI_READ_DATA_FROM_FLASH_CMDID,
+ WMI_THERM_THROT_SET_CONF_CMDID,
+ WMI_RUNTIME_DPD_RECAL_CMDID,
+ WMI_GET_TPC_POWER_CMDID,
+ WMI_IDLE_TRIGGER_MONITOR_CMDID,
+ WMI_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_GPIO),
+ WMI_GPIO_OUTPUT_CMDID,
+ WMI_TXBF_CMDID,
+ WMI_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_FWTEST),
+ WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID,
+ WMI_UNIT_TEST_CMDID,
+ WMI_FWTEST_CMDID,
+ WMI_QBOOST_CFG_CMDID,
+ WMI_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_TDLS),
+ WMI_TDLS_PEER_UPDATE_CMDID,
+ WMI_TDLS_SET_OFFCHAN_MODE_CMDID,
+ WMI_RESMGR_ADAPTIVE_OCS_EN_DIS_CMDID = WMI_TLV_CMD(WMI_GRP_RESMGR),
+ WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
+ WMI_RESMGR_SET_CHAN_LATENCY_CMDID,
+ WMI_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+ WMI_STA_SMPS_PARAM_CMDID,
+ WMI_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_WLAN_HB),
+ WMI_HB_SET_TCP_PARAMS_CMDID,
+ WMI_HB_SET_TCP_PKT_FILTER_CMDID,
+ WMI_HB_SET_UDP_PARAMS_CMDID,
+ WMI_HB_SET_UDP_PKT_FILTER_CMDID,
+ WMI_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_RMC),
+ WMI_RMC_SET_ACTION_PERIOD_CMDID,
+ WMI_RMC_CONFIG_CMDID,
+ WMI_RMC_SET_MANUAL_LEADER_CMDID,
+ WMI_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_MHF_OFL),
+ WMI_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
+ WMI_BATCH_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+ WMI_BATCH_SCAN_DISABLE_CMDID,
+ WMI_BATCH_SCAN_TRIGGER_RESULT_CMDID,
+ WMI_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_OEM),
+ WMI_OEM_REQUEST_CMDID,
+ WMI_LPI_OEM_REQ_CMDID,
+ WMI_NAN_CMDID = WMI_TLV_CMD(WMI_GRP_NAN),
+ WMI_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_COEX),
+ WMI_CHAN_AVOID_UPDATE_CMDID,
+ WMI_COEX_CONFIG_CMDID,
+ WMI_CHAN_AVOID_RPT_ALLOW_CMDID,
+ WMI_COEX_GET_ANTENNA_ISOLATION_CMDID,
+ WMI_SAR_LIMITS_CMDID,
+ WMI_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_OBSS_OFL),
+ WMI_OBSS_SCAN_DISABLE_CMDID,
+ WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID,
+ WMI_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_LPI),
+ WMI_LPI_START_SCAN_CMDID,
+ WMI_LPI_STOP_SCAN_CMDID,
+ WMI_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+ WMI_EXTSCAN_STOP_CMDID,
+ WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
+ WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
+ WMI_EXTSCAN_GET_CACHED_RESULTS_CMDID,
+ WMI_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
+ WMI_EXTSCAN_SET_CAPABILITIES_CMDID,
+ WMI_EXTSCAN_GET_CAPABILITIES_CMDID,
+ WMI_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMDID,
+ WMI_EXTSCAN_CONFIGURE_MAWC_CMDID,
+ WMI_SET_DHCP_SERVER_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_DHCP_OFL),
+ WMI_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_GRP_IPA),
+ WMI_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+ WMI_MDNS_SET_FQDN_CMDID,
+ WMI_MDNS_SET_RESPONSE_CMDID,
+ WMI_MDNS_GET_STATS_CMDID,
+ WMI_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+ WMI_SAP_SET_BLACKLIST_PARAM_CMDID,
+ WMI_OCB_SET_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_OCB),
+ WMI_OCB_SET_UTC_TIME_CMDID,
+ WMI_OCB_START_TIMING_ADVERT_CMDID,
+ WMI_OCB_STOP_TIMING_ADVERT_CMDID,
+ WMI_OCB_GET_TSF_TIMER_CMDID,
+ WMI_DCC_GET_STATS_CMDID,
+ WMI_DCC_CLEAR_STATS_CMDID,
+ WMI_DCC_UPDATE_NDL_CMDID,
+ WMI_SOC_SET_PCL_CMDID = WMI_TLV_CMD(WMI_GRP_SOC),
+ WMI_SOC_SET_HW_MODE_CMDID,
+ WMI_SOC_SET_DUAL_MAC_CONFIG_CMDID,
+ WMI_SOC_SET_ANTENNA_MODE_CMDID,
+ WMI_PACKET_FILTER_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_PKT_FILTER),
+ WMI_PACKET_FILTER_ENABLE_CMDID,
+ WMI_MAWC_SENSOR_REPORT_IND_CMDID = WMI_TLV_CMD(WMI_GRP_MAWC),
+ WMI_PMF_OFFLOAD_SET_SA_QUERY_CMDID = WMI_TLV_CMD(WMI_GRP_PMF_OFFLOAD),
+ WMI_BPF_GET_CAPABILITY_CMDID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+ WMI_BPF_GET_VDEV_STATS_CMDID,
+ WMI_BPF_SET_VDEV_INSTRUCTIONS_CMDID,
+ WMI_BPF_DEL_VDEV_INSTRUCTIONS_CMDID,
+ WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID,
+ WMI_MNT_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_MONITOR),
+ WMI_SET_CURRENT_COUNTRY_CMDID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+ WMI_11D_SCAN_START_CMDID,
+ WMI_11D_SCAN_STOP_CMDID,
+ WMI_SET_INIT_COUNTRY_CMDID,
+ WMI_NDI_GET_CAP_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+ WMI_NDP_INITIATOR_REQ_CMDID,
+ WMI_NDP_RESPONDER_REQ_CMDID,
+ WMI_NDP_END_REQ_CMDID,
+ WMI_HW_DATA_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_HW_DATA_FILTER),
+ WMI_TWT_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_CMDID,
+ WMI_TWT_ADD_DIALOG_CMDID,
+ WMI_TWT_DEL_DIALOG_CMDID,
+ WMI_TWT_PAUSE_DIALOG_CMDID,
+ WMI_TWT_RESUME_DIALOG_CMDID,
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID =
+ WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE),
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID,
+};
+
+enum wmi_tlv_event_id {
+ WMI_SERVICE_READY_EVENTID = 0x1,
+ WMI_READY_EVENTID,
+ WMI_SERVICE_AVAILABLE_EVENTID,
+ WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+ WMI_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_CMD(WMI_GRP_PDEV),
+ WMI_CHAN_INFO_EVENTID,
+ WMI_PHYERR_EVENTID,
+ WMI_PDEV_DUMP_EVENTID,
+ WMI_TX_PAUSE_EVENTID,
+ WMI_DFS_RADAR_EVENTID,
+ WMI_PDEV_L1SS_TRACK_EVENTID,
+ WMI_PDEV_TEMPERATURE_EVENTID,
+ WMI_SERVICE_READY_EXT_EVENTID,
+ WMI_PDEV_FIPS_EVENTID,
+ WMI_PDEV_CHANNEL_HOPPING_EVENTID,
+ WMI_PDEV_ANI_CCK_LEVEL_EVENTID,
+ WMI_PDEV_ANI_OFDM_LEVEL_EVENTID,
+ WMI_PDEV_TPC_EVENTID,
+ WMI_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+ WMI_PDEV_SET_HW_MODE_RESP_EVENTID,
+ WMI_PDEV_HW_MODE_TRANSITION_EVENTID,
+ WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID,
+ WMI_PDEV_ANTDIV_STATUS_EVENTID,
+ WMI_PDEV_CHIP_POWER_STATS_EVENTID,
+ WMI_PDEV_CHIP_POWER_SAVE_FAILURE_DETECTED_EVENTID,
+ WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID,
+ WMI_PDEV_CHECK_CAL_VERSION_EVENTID,
+ WMI_PDEV_DIV_RSSI_ANTID_EVENTID,
+ WMI_PDEV_BSS_CHAN_INFO_EVENTID,
+ WMI_PDEV_UPDATE_CTLTABLE_EVENTID,
+ WMI_PDEV_DMA_RING_CFG_RSP_EVENTID,
+ WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID,
+ WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID,
+ WMI_PDEV_CSC_SWITCH_COUNT_STATUS_EVENTID,
+ WMI_PDEV_COLD_BOOT_CAL_DATA_EVENTID,
+ WMI_PDEV_RAP_INFO_EVENTID,
+ WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID,
+ WMI_SERVICE_READY_EXT2_EVENTID,
+ WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
+ WMI_VDEV_STOPPED_EVENTID,
+ WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+ WMI_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
+ WMI_VDEV_TSF_REPORT_EVENTID,
+ WMI_VDEV_DELETE_RESP_EVENTID,
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID,
+ WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENTID,
+ WMI_PEER_STA_KICKOUT_EVENTID = WMI_TLV_CMD(WMI_GRP_PEER),
+ WMI_PEER_INFO_EVENTID,
+ WMI_PEER_TX_FAIL_CNT_THR_EVENTID,
+ WMI_PEER_ESTIMATED_LINKSPEED_EVENTID,
+ WMI_PEER_STATE_EVENTID,
+ WMI_PEER_ASSOC_CONF_EVENTID,
+ WMI_PEER_DELETE_RESP_EVENTID,
+ WMI_PEER_RATECODE_LIST_EVENTID,
+ WMI_WDS_PEER_EVENTID,
+ WMI_PEER_STA_PS_STATECHG_EVENTID,
+ WMI_PEER_ANTDIV_INFO_EVENTID,
+ WMI_PEER_RESERVED0_EVENTID,
+ WMI_PEER_RESERVED1_EVENTID,
+ WMI_PEER_RESERVED2_EVENTID,
+ WMI_PEER_RESERVED3_EVENTID,
+ WMI_PEER_RESERVED4_EVENTID,
+ WMI_PEER_RESERVED5_EVENTID,
+ WMI_PEER_RESERVED6_EVENTID,
+ WMI_PEER_RESERVED7_EVENTID,
+ WMI_PEER_RESERVED8_EVENTID,
+ WMI_PEER_RESERVED9_EVENTID,
+ WMI_PEER_RESERVED10_EVENTID,
+ WMI_PEER_OPER_MODE_CHANGE_EVENTID,
+ WMI_PEER_TX_PN_RESPONSE_EVENTID,
+ WMI_PEER_CFR_CAPTURE_EVENTID,
+ WMI_PEER_CREATE_CONF_EVENTID,
+ WMI_MGMT_RX_EVENTID = WMI_TLV_CMD(WMI_GRP_MGMT),
+ WMI_HOST_SWBA_EVENTID,
+ WMI_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_OFFLOAD_BCN_TX_STATUS_EVENTID,
+ WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+ WMI_MGMT_TX_COMPLETION_EVENTID,
+ WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID,
+ WMI_TBTTOFFSET_EXT_UPDATE_EVENTID,
+ WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID,
+ WMI_HOST_FILS_DISCOVERY_EVENTID,
+ WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+ WMI_TX_ADDBA_COMPLETE_EVENTID,
+ WMI_BA_RSP_SSN_EVENTID,
+ WMI_AGGR_STATE_TRIG_EVENTID,
+ WMI_ROAM_EVENTID = WMI_TLV_CMD(WMI_GRP_ROAM),
+ WMI_PROFILE_MATCH,
+ WMI_ROAM_SYNCH_EVENTID,
+ WMI_P2P_DISC_EVENTID = WMI_TLV_CMD(WMI_GRP_P2P),
+ WMI_P2P_NOA_EVENTID,
+ WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID,
+ WMI_AP_PS_EGAP_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+ WMI_PDEV_RESUME_EVENTID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+ WMI_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_CMD(WMI_GRP_WOW),
+ WMI_D0_WOW_DISABLE_ACK_EVENTID,
+ WMI_WOW_INITIAL_WAKEUP_EVENTID,
+ WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_CMD(WMI_GRP_RTT),
+ WMI_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_RTT_ERROR_REPORT_EVENTID,
+ WMI_STATS_EXT_EVENTID = WMI_TLV_CMD(WMI_GRP_STATS),
+ WMI_IFACE_LINK_STATS_EVENTID,
+ WMI_PEER_LINK_STATS_EVENTID,
+ WMI_RADIO_LINK_STATS_EVENTID,
+ WMI_UPDATE_FW_MEM_DUMP_EVENTID,
+ WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID,
+ WMI_INST_RSSI_STATS_EVENTID,
+ WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID,
+ WMI_REPORT_STATS_EVENTID,
+ WMI_UPDATE_RCPI_EVENTID,
+ WMI_PEER_STATS_INFO_EVENTID,
+ WMI_RADIO_CHAN_STATS_EVENTID,
+ WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+ WMI_NLO_SCAN_COMPLETE_EVENTID,
+ WMI_APFIND_EVENTID,
+ WMI_PASSPOINT_MATCH_EVENTID,
+ WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+ WMI_GTK_REKEY_FAIL_EVENTID,
+ WMI_CSA_HANDLING_EVENTID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+ WMI_CHATTER_PC_QUERY_EVENTID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+ WMI_PDEV_DFS_RADAR_DETECTION_EVENTID = WMI_TLV_CMD(WMI_GRP_DFS),
+ WMI_VDEV_DFS_CAC_COMPLETE_EVENTID,
+ WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID,
+ WMI_ECHO_EVENTID = WMI_TLV_CMD(WMI_GRP_MISC),
+ WMI_PDEV_UTF_EVENTID,
+ WMI_DEBUG_MESG_EVENTID,
+ WMI_UPDATE_STATS_EVENTID,
+ WMI_DEBUG_PRINT_EVENTID,
+ WMI_DCS_INTERFERENCE_EVENTID,
+ WMI_PDEV_QVIT_EVENTID,
+ WMI_WLAN_PROFILE_DATA_EVENTID,
+ WMI_PDEV_FTM_INTG_EVENTID,
+ WMI_WLAN_FREQ_AVOID_EVENTID,
+ WMI_VDEV_GET_KEEPALIVE_EVENTID,
+ WMI_THERMAL_MGMT_EVENTID,
+ WMI_DIAG_DATA_CONTAINER_EVENTID,
+ WMI_HOST_AUTO_SHUTDOWN_EVENTID,
+ WMI_UPDATE_WHAL_MIB_STATS_EVENTID,
+ WMI_UPDATE_VDEV_RATE_STATS_EVENTID,
+ WMI_DIAG_EVENTID,
+ WMI_OCB_SET_SCHED_EVENTID,
+ WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID,
+ WMI_RSSI_BREACH_EVENTID,
+ WMI_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENTID,
+ WMI_PDEV_UTF_SCPC_EVENTID,
+ WMI_READ_DATA_FROM_FLASH_EVENTID,
+ WMI_REPORT_RX_AGGR_FAILURE_EVENTID,
+ WMI_PKGID_EVENTID,
+ WMI_GPIO_INPUT_EVENTID = WMI_TLV_CMD(WMI_GRP_GPIO),
+ WMI_UPLOADH_EVENTID,
+ WMI_CAPTUREH_EVENTID,
+ WMI_RFKILL_STATE_CHANGE_EVENTID,
+ WMI_TDLS_PEER_EVENTID = WMI_TLV_CMD(WMI_GRP_TDLS),
+ WMI_STA_SMPS_FORCE_MODE_COMPL_EVENTID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+ WMI_BATCH_SCAN_ENABLED_EVENTID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+ WMI_BATCH_SCAN_RESULT_EVENTID,
+ WMI_OEM_CAPABILITY_EVENTID = WMI_TLV_CMD(WMI_GRP_OEM),
+ WMI_OEM_MEASUREMENT_REPORT_EVENTID,
+ WMI_OEM_ERROR_REPORT_EVENTID,
+ WMI_OEM_RESPONSE_EVENTID,
+ WMI_NAN_EVENTID = WMI_TLV_CMD(WMI_GRP_NAN),
+ WMI_NAN_DISC_IFACE_CREATED_EVENTID,
+ WMI_NAN_DISC_IFACE_DELETED_EVENTID,
+ WMI_NAN_STARTED_CLUSTER_EVENTID,
+ WMI_NAN_JOINED_CLUSTER_EVENTID,
+ WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID = WMI_TLV_CMD(WMI_GRP_COEX),
+ WMI_LPI_RESULT_EVENTID = WMI_TLV_CMD(WMI_GRP_LPI),
+ WMI_LPI_STATUS_EVENTID,
+ WMI_LPI_HANDOFF_EVENTID,
+ WMI_EXTSCAN_START_STOP_EVENTID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+ WMI_EXTSCAN_OPERATION_EVENTID,
+ WMI_EXTSCAN_TABLE_USAGE_EVENTID,
+ WMI_EXTSCAN_CACHED_RESULTS_EVENTID,
+ WMI_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
+ WMI_EXTSCAN_HOTLIST_MATCH_EVENTID,
+ WMI_EXTSCAN_CAPABILITIES_EVENTID,
+ WMI_EXTSCAN_HOTLIST_SSID_MATCH_EVENTID,
+ WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+ WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+ WMI_SAP_OFL_DEL_STA_EVENTID,
+ WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID =
+ WMI_EVT_GRP_START_ID(WMI_GRP_OBSS_OFL),
+ WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB),
+ WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
+ WMI_DCC_GET_STATS_RESP_EVENTID,
+ WMI_DCC_UPDATE_NDL_RESP_EVENTID,
+ WMI_DCC_STATS_EVENTID,
+ WMI_SOC_SET_HW_MODE_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_SOC),
+ WMI_SOC_HW_MODE_TRANSITION_EVENTID,
+ WMI_SOC_SET_DUAL_MAC_CONFIG_RESP_EVENTID,
+ WMI_MAWC_ENABLE_SENSOR_EVENTID = WMI_TLV_CMD(WMI_GRP_MAWC),
+ WMI_BPF_CAPABILIY_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+ WMI_BPF_VDEV_STATS_INFO_EVENTID,
+ WMI_RMC_NEW_LEADER_EVENTID = WMI_TLV_CMD(WMI_GRP_RMC),
+ WMI_REG_CHAN_LIST_CC_EVENTID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+ WMI_11D_NEW_COUNTRY_EVENTID,
+ WMI_REG_CHAN_LIST_CC_EXT_EVENTID,
+ WMI_NDI_CAP_RSP_EVENTID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+ WMI_NDP_INITIATOR_RSP_EVENTID,
+ WMI_NDP_RESPONDER_RSP_EVENTID,
+ WMI_NDP_END_RSP_EVENTID,
+ WMI_NDP_INDICATION_EVENTID,
+ WMI_NDP_CONFIRM_EVENTID,
+ WMI_NDP_END_INDICATION_EVENTID,
+
+ WMI_TWT_ENABLE_EVENTID = WMI_TLV_CMD(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_EVENTID,
+ WMI_TWT_ADD_DIALOG_EVENTID,
+ WMI_TWT_DEL_DIALOG_EVENTID,
+ WMI_TWT_PAUSE_DIALOG_EVENTID,
+ WMI_TWT_RESUME_DIALOG_EVENTID,
+};
+
+enum wmi_tlv_pdev_param {
+ WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ WMI_PDEV_PARAM_TXPOWER_SCALE,
+ WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ WMI_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ WMI_PDEV_PARAM_PROTECTION_MODE,
+ WMI_PDEV_PARAM_DYNAMIC_BW,
+ WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ WMI_PDEV_PARAM_LTR_ENABLE,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ WMI_PDEV_PARAM_L1SS_ENABLE,
+ WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_PMF_QOS,
+ WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ WMI_PDEV_PARAM_DCS,
+ WMI_PDEV_PARAM_ANI_ENABLE,
+ WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ WMI_PDEV_PARAM_DYNTXCHAIN,
+ WMI_PDEV_PARAM_PROXY_STA,
+ WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+ WMI_PDEV_PARAM_RFKILL_ENABLE,
+ WMI_PDEV_PARAM_BURST_DUR,
+ WMI_PDEV_PARAM_BURST_ENABLE,
+ WMI_PDEV_PARAM_HW_RFKILL_CONFIG,
+ WMI_PDEV_PARAM_LOW_POWER_RF_ENABLE,
+ WMI_PDEV_PARAM_L1SS_TRACK,
+ WMI_PDEV_PARAM_HYST_EN,
+ WMI_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
+ WMI_PDEV_PARAM_LED_SYS_STATE,
+ WMI_PDEV_PARAM_LED_ENABLE,
+ WMI_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
+ WMI_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
+ WMI_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
+ WMI_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_CTS_CBW,
+ WMI_PDEV_PARAM_WNTS_CONFIG,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_ENABLE,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_MIN_SLEEP_SLOP,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_INC_DEC_STEP,
+ WMI_PDEV_PARAM_EARLY_RX_FIX_SLEEP_SLOP,
+ WMI_PDEV_PARAM_BMISS_BASED_ADAPTIVE_BTO_ENABLE,
+ WMI_PDEV_PARAM_BMISS_BTO_MIN_BCN_TIMEOUT,
+ WMI_PDEV_PARAM_BMISS_BTO_INC_DEC_STEP,
+ WMI_PDEV_PARAM_BTO_FIX_BCN_TIMEOUT,
+ WMI_PDEV_PARAM_CE_BASED_ADAPTIVE_BTO_ENABLE,
+ WMI_PDEV_PARAM_CE_BTO_COMBO_CE_VALUE,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_2G,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK_2G,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_5G,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK_5G,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_CCK,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_1SS,
+ WMI_PDEV_PARAM_CTS2SELF_FOR_P2P_GO_CONFIG,
+ WMI_PDEV_PARAM_TXPOWER_DECR_DB,
+ WMI_PDEV_PARAM_AGGR_BURST,
+ WMI_PDEV_PARAM_RX_DECAP_MODE,
+ WMI_PDEV_PARAM_FAST_CHANNEL_RESET,
+ WMI_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ WMI_PDEV_PARAM_ANTENNA_GAIN,
+ WMI_PDEV_PARAM_RX_FILTER,
+ WMI_PDEV_SET_MCAST_TO_UCAST_TID,
+ WMI_PDEV_PARAM_PROXY_STA_MODE,
+ WMI_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ WMI_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ WMI_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+ WMI_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+ WMI_PDEV_PARAM_BLOCK_INTERBSS,
+ WMI_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+ WMI_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+ WMI_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+ WMI_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+ WMI_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_PDEV_PARAM_SET_BURST_MODE_CMDID,
+ WMI_PDEV_PARAM_EN_STATS,
+ WMI_PDEV_PARAM_MU_GROUP_POLICY,
+ WMI_PDEV_PARAM_NOISE_DETECTION,
+ WMI_PDEV_PARAM_NOISE_THRESHOLD,
+ WMI_PDEV_PARAM_DPD_ENABLE,
+ WMI_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+ WMI_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_PDEV_PARAM_ANT_PLZN,
+ WMI_PDEV_PARAM_MGMT_RETRY_LIMIT,
+ WMI_PDEV_PARAM_SENSITIVITY_LEVEL,
+ WMI_PDEV_PARAM_SIGNED_TXPOWER_2G,
+ WMI_PDEV_PARAM_SIGNED_TXPOWER_5G,
+ WMI_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+ WMI_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+ WMI_PDEV_PARAM_CCA_THRESHOLD,
+ WMI_PDEV_PARAM_RTS_FIXED_RATE,
+ WMI_PDEV_PARAM_PDEV_RESET,
+ WMI_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+ WMI_PDEV_PARAM_ARP_DBG_SRCADDR,
+ WMI_PDEV_PARAM_ARP_DBG_DSTADDR,
+ WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCH,
+ WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR,
+ WMI_PDEV_PARAM_CUST_TXPOWER_SCALE,
+ WMI_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
+ WMI_PDEV_PARAM_CTRL_RETRY_LIMIT,
+ WMI_PDEV_PARAM_PROPAGATION_DELAY,
+ WMI_PDEV_PARAM_ENA_ANT_DIV,
+ WMI_PDEV_PARAM_FORCE_CHAIN_ANT,
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST,
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST_INTVL,
+ WMI_PDEV_PARAM_STATS_OBSERVATION_PERIOD,
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_BIN_SIZE_MS,
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_ARRAY_LEN,
+ WMI_PDEV_PARAM_TX_MPDU_AGGR_ARRAY_LEN,
+ WMI_PDEV_PARAM_RX_MPDU_AGGR_ARRAY_LEN,
+ WMI_PDEV_PARAM_TX_SCH_DELAY,
+ WMI_PDEV_PARAM_ENABLE_RTS_SIFS_BURSTING,
+ WMI_PDEV_PARAM_MAX_MPDUS_IN_AMPDU,
+ WMI_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
+ WMI_PDEV_PARAM_FAST_PWR_TRANSITION,
+ WMI_PDEV_PARAM_RADIO_CHAN_STATS_ENABLE,
+ WMI_PDEV_PARAM_RADIO_DIAGNOSIS_ENABLE,
+ WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+ WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD = 0xbc,
+ WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC = 0xbe,
+ WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT = 0xc6,
+};
+
+enum wmi_tlv_vdev_param {
+ WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ WMI_VDEV_PARAM_BEACON_INTERVAL,
+ WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ WMI_VDEV_PARAM_MULTICAST_RATE,
+ WMI_VDEV_PARAM_MGMT_TX_RATE,
+ WMI_VDEV_PARAM_SLOT_TIME,
+ WMI_VDEV_PARAM_PREAMBLE,
+ WMI_VDEV_PARAM_SWBA_TIME,
+ WMI_VDEV_STATS_UPDATE_PERIOD,
+ WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ WMI_VDEV_HOST_SWBA_INTERVAL,
+ WMI_VDEV_PARAM_DTIM_PERIOD,
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ WMI_VDEV_PARAM_WDS,
+ WMI_VDEV_PARAM_ATIM_WINDOW,
+ WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ WMI_VDEV_PARAM_FEATURE_WMM,
+ WMI_VDEV_PARAM_CHWIDTH,
+ WMI_VDEV_PARAM_CHEXTOFFSET,
+ WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ WMI_VDEV_PARAM_MGMT_RATE,
+ WMI_VDEV_PARAM_PROTECTION_MODE,
+ WMI_VDEV_PARAM_FIXED_RATE,
+ WMI_VDEV_PARAM_SGI,
+ WMI_VDEV_PARAM_LDPC,
+ WMI_VDEV_PARAM_TX_STBC,
+ WMI_VDEV_PARAM_RX_STBC,
+ WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ WMI_VDEV_PARAM_DEF_KEYID,
+ WMI_VDEV_PARAM_NSS,
+ WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ WMI_VDEV_PARAM_MCAST_INDICATE,
+ WMI_VDEV_PARAM_DHCP_INDICATE,
+ WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ WMI_VDEV_PARAM_TXBF,
+ WMI_VDEV_PARAM_PACKET_POWERSAVE,
+ WMI_VDEV_PARAM_DROP_UNENCRY,
+ WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ WMI_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ WMI_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ WMI_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ WMI_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ WMI_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ WMI_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ WMI_VDEV_PARAM_TX_PWRLIMIT,
+ WMI_VDEV_PARAM_SNR_NUM_FOR_CAL,
+ WMI_VDEV_PARAM_ROAM_FW_OFFLOAD,
+ WMI_VDEV_PARAM_ENABLE_RMC,
+ WMI_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
+ WMI_VDEV_PARAM_MAX_RATE,
+ WMI_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
+ WMI_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
+ WMI_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
+ WMI_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
+ WMI_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
+ WMI_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
+ WMI_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
+ WMI_VDEV_PARAM_INACTIVITY_CNT,
+ WMI_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
+ WMI_VDEV_PARAM_DTIM_POLICY,
+ WMI_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
+ WMI_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
+ WMI_VDEV_PARAM_RX_LEAK_WINDOW,
+ WMI_VDEV_PARAM_STATS_AVG_FACTOR,
+ WMI_VDEV_PARAM_DISCONNECT_TH,
+ WMI_VDEV_PARAM_RTSCTS_RATE,
+ WMI_VDEV_PARAM_MCC_RTSCTS_PROTECTION_ENABLE,
+ WMI_VDEV_PARAM_MCC_BROADCAST_PROBE_ENABLE,
+ WMI_VDEV_PARAM_TXPOWER_SCALE,
+ WMI_VDEV_PARAM_TXPOWER_SCALE_DECR_DB,
+ WMI_VDEV_PARAM_MCAST2UCAST_SET,
+ WMI_VDEV_PARAM_RC_NUM_RETRIES,
+ WMI_VDEV_PARAM_CABQ_MAXDUR,
+ WMI_VDEV_PARAM_MFPTEST_SET,
+ WMI_VDEV_PARAM_RTS_FIXED_RATE,
+ WMI_VDEV_PARAM_VHT_SGIMASK,
+ WMI_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_VDEV_PARAM_PROXY_STA,
+ WMI_VDEV_PARAM_VIRTUAL_CELL_MODE,
+ WMI_VDEV_PARAM_RX_DECAP_TYPE,
+ WMI_VDEV_PARAM_BW_NSS_RATEMASK,
+ WMI_VDEV_PARAM_SENSOR_AP,
+ WMI_VDEV_PARAM_BEACON_RATE,
+ WMI_VDEV_PARAM_DTIM_ENABLE_CTS,
+ WMI_VDEV_PARAM_STA_KICKOUT,
+ WMI_VDEV_PARAM_CAPABILITIES,
+ WMI_VDEV_PARAM_TSF_INCREMENT,
+ WMI_VDEV_PARAM_AMPDU_PER_AC,
+ WMI_VDEV_PARAM_RX_FILTER,
+ WMI_VDEV_PARAM_MGMT_TX_POWER,
+ WMI_VDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_VDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_VDEV_PARAM_DISABLE_DYN_BW_RTS,
+ WMI_VDEV_PARAM_ATF_SSID_SCHED_POLICY,
+ WMI_VDEV_PARAM_HE_DCM,
+ WMI_VDEV_PARAM_HE_RANGE_EXT,
+ WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
+ WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
+ WMI_VDEV_PARAM_HE_LTF = 0x74,
+ WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE = 0x7d,
+ WMI_VDEV_PARAM_BA_MODE = 0x7e,
+ WMI_VDEV_PARAM_AUTORATE_MISC_CFG = 0x80,
+ WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
+ WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99,
+ WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
+ WMI_VDEV_PARAM_BSS_COLOR,
+ WMI_VDEV_PARAM_SET_HEMU_MODE,
+ WMI_VDEV_PARAM_HEOPS_0_31 = 0x8003,
+};
+
+enum wmi_tlv_peer_flags {
+ WMI_PEER_AUTH = 0x00000001,
+ WMI_PEER_QOS = 0x00000002,
+ WMI_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_PEER_HE = 0x00000400,
+ WMI_PEER_APSD = 0x00000800,
+ WMI_PEER_HT = 0x00001000,
+ WMI_PEER_40MHZ = 0x00002000,
+ WMI_PEER_STBC = 0x00008000,
+ WMI_PEER_LDPC = 0x00010000,
+ WMI_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_PEER_TWT_REQ = 0x00400000,
+ WMI_PEER_TWT_RESP = 0x00800000,
+ WMI_PEER_VHT = 0x02000000,
+ WMI_PEER_80MHZ = 0x04000000,
+ WMI_PEER_PMF = 0x08000000,
+ WMI_PEER_IS_P2P_CAPABLE = 0x20000000,
+ WMI_PEER_160MHZ = 0x40000000,
+ WMI_PEER_SAFEMODE_EN = 0x80000000,
+};
+
+/** Enum list of TLV Tags for each parameter structure type. */
+enum wmi_tlv_tag {
+ WMI_TAG_LAST_RESERVED = 15,
+ WMI_TAG_FIRST_ARRAY_ENUM,
+ WMI_TAG_ARRAY_UINT32 = WMI_TAG_FIRST_ARRAY_ENUM,
+ WMI_TAG_ARRAY_BYTE,
+ WMI_TAG_ARRAY_STRUCT,
+ WMI_TAG_ARRAY_FIXED_STRUCT,
+ WMI_TAG_LAST_ARRAY_ENUM = 31,
+ WMI_TAG_SERVICE_READY_EVENT,
+ WMI_TAG_HAL_REG_CAPABILITIES,
+ WMI_TAG_WLAN_HOST_MEM_REQ,
+ WMI_TAG_READY_EVENT,
+ WMI_TAG_SCAN_EVENT,
+ WMI_TAG_PDEV_TPC_CONFIG_EVENT,
+ WMI_TAG_CHAN_INFO_EVENT,
+ WMI_TAG_COMB_PHYERR_RX_HDR,
+ WMI_TAG_VDEV_START_RESPONSE_EVENT,
+ WMI_TAG_VDEV_STOPPED_EVENT,
+ WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT,
+ WMI_TAG_PEER_STA_KICKOUT_EVENT,
+ WMI_TAG_MGMT_RX_HDR,
+ WMI_TAG_TBTT_OFFSET_EVENT,
+ WMI_TAG_TX_DELBA_COMPLETE_EVENT,
+ WMI_TAG_TX_ADDBA_COMPLETE_EVENT,
+ WMI_TAG_ROAM_EVENT,
+ WMI_TAG_WOW_EVENT_INFO,
+ WMI_TAG_WOW_EVENT_INFO_SECTION_BITMAP,
+ WMI_TAG_RTT_EVENT_HEADER,
+ WMI_TAG_RTT_ERROR_REPORT_EVENT,
+ WMI_TAG_RTT_MEAS_EVENT,
+ WMI_TAG_ECHO_EVENT,
+ WMI_TAG_FTM_INTG_EVENT,
+ WMI_TAG_VDEV_GET_KEEPALIVE_EVENT,
+ WMI_TAG_GPIO_INPUT_EVENT,
+ WMI_TAG_CSA_EVENT,
+ WMI_TAG_GTK_OFFLOAD_STATUS_EVENT,
+ WMI_TAG_IGTK_INFO,
+ WMI_TAG_DCS_INTERFERENCE_EVENT,
+ WMI_TAG_ATH_DCS_CW_INT,
+ WMI_TAG_WLAN_DCS_CW_INT = /* ALIAS */
+ WMI_TAG_ATH_DCS_CW_INT,
+ WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+ WMI_TAG_WLAN_DCS_IM_TGT_STATS_T = /* ALIAS */
+ WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+ WMI_TAG_WLAN_PROFILE_CTX_T,
+ WMI_TAG_WLAN_PROFILE_T,
+ WMI_TAG_PDEV_QVIT_EVENT,
+ WMI_TAG_HOST_SWBA_EVENT,
+ WMI_TAG_TIM_INFO,
+ WMI_TAG_P2P_NOA_INFO,
+ WMI_TAG_STATS_EVENT,
+ WMI_TAG_AVOID_FREQ_RANGES_EVENT,
+ WMI_TAG_AVOID_FREQ_RANGE_DESC,
+ WMI_TAG_GTK_REKEY_FAIL_EVENT,
+ WMI_TAG_INIT_CMD,
+ WMI_TAG_RESOURCE_CONFIG,
+ WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
+ WMI_TAG_START_SCAN_CMD,
+ WMI_TAG_STOP_SCAN_CMD,
+ WMI_TAG_SCAN_CHAN_LIST_CMD,
+ WMI_TAG_CHANNEL,
+ WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
+ WMI_TAG_PDEV_SET_PARAM_CMD,
+ WMI_TAG_PDEV_SET_WMM_PARAMS_CMD,
+ WMI_TAG_WMM_PARAMS,
+ WMI_TAG_PDEV_SET_QUIET_CMD,
+ WMI_TAG_VDEV_CREATE_CMD,
+ WMI_TAG_VDEV_DELETE_CMD,
+ WMI_TAG_VDEV_START_REQUEST_CMD,
+ WMI_TAG_P2P_NOA_DESCRIPTOR,
+ WMI_TAG_P2P_GO_SET_BEACON_IE,
+ WMI_TAG_GTK_OFFLOAD_CMD,
+ WMI_TAG_VDEV_UP_CMD,
+ WMI_TAG_VDEV_STOP_CMD,
+ WMI_TAG_VDEV_DOWN_CMD,
+ WMI_TAG_VDEV_SET_PARAM_CMD,
+ WMI_TAG_VDEV_INSTALL_KEY_CMD,
+ WMI_TAG_PEER_CREATE_CMD,
+ WMI_TAG_PEER_DELETE_CMD,
+ WMI_TAG_PEER_FLUSH_TIDS_CMD,
+ WMI_TAG_PEER_SET_PARAM_CMD,
+ WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
+ WMI_TAG_VHT_RATE_SET,
+ WMI_TAG_BCN_TMPL_CMD,
+ WMI_TAG_PRB_TMPL_CMD,
+ WMI_TAG_BCN_PRB_INFO,
+ WMI_TAG_PEER_TID_ADDBA_CMD,
+ WMI_TAG_PEER_TID_DELBA_CMD,
+ WMI_TAG_STA_POWERSAVE_MODE_CMD,
+ WMI_TAG_STA_POWERSAVE_PARAM_CMD,
+ WMI_TAG_STA_DTIM_PS_METHOD_CMD,
+ WMI_TAG_ROAM_SCAN_MODE,
+ WMI_TAG_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_TAG_ROAM_SCAN_PERIOD,
+ WMI_TAG_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_TAG_PDEV_SUSPEND_CMD,
+ WMI_TAG_PDEV_RESUME_CMD,
+ WMI_TAG_ADD_BCN_FILTER_CMD,
+ WMI_TAG_RMV_BCN_FILTER_CMD,
+ WMI_TAG_WOW_ENABLE_CMD,
+ WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ WMI_TAG_STA_UAPSD_AUTO_TRIG_CMD,
+ WMI_TAG_STA_UAPSD_AUTO_TRIG_PARAM,
+ WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
+ WMI_TAG_ARP_OFFLOAD_TUPLE,
+ WMI_TAG_NS_OFFLOAD_TUPLE,
+ WMI_TAG_FTM_INTG_CMD,
+ WMI_TAG_STA_KEEPALIVE_CMD,
+ WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE,
+ WMI_TAG_P2P_SET_VENDOR_IE_DATA_CMD,
+ WMI_TAG_AP_PS_PEER_CMD,
+ WMI_TAG_PEER_RATE_RETRY_SCHED_CMD,
+ WMI_TAG_WLAN_PROFILE_TRIGGER_CMD,
+ WMI_TAG_WLAN_PROFILE_SET_HIST_INTVL_CMD,
+ WMI_TAG_WLAN_PROFILE_GET_PROF_DATA_CMD,
+ WMI_TAG_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
+ WMI_TAG_WOW_DEL_PATTERN_CMD,
+ WMI_TAG_WOW_ADD_DEL_EVT_CMD,
+ WMI_TAG_RTT_MEASREQ_HEAD,
+ WMI_TAG_RTT_MEASREQ_BODY,
+ WMI_TAG_RTT_TSF_CMD,
+ WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
+ WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
+ WMI_TAG_REQUEST_STATS_CMD,
+ WMI_TAG_NLO_CONFIG_CMD,
+ WMI_TAG_NLO_CONFIGURED_PARAMETERS,
+ WMI_TAG_CSA_OFFLOAD_ENABLE_CMD,
+ WMI_TAG_CSA_OFFLOAD_CHANSWITCH_CMD,
+ WMI_TAG_CHATTER_SET_MODE_CMD,
+ WMI_TAG_ECHO_CMD,
+ WMI_TAG_VDEV_SET_KEEPALIVE_CMD,
+ WMI_TAG_VDEV_GET_KEEPALIVE_CMD,
+ WMI_TAG_FORCE_FW_HANG_CMD,
+ WMI_TAG_GPIO_CONFIG_CMD,
+ WMI_TAG_GPIO_OUTPUT_CMD,
+ WMI_TAG_PEER_ADD_WDS_ENTRY_CMD,
+ WMI_TAG_PEER_REMOVE_WDS_ENTRY_CMD,
+ WMI_TAG_BCN_TX_HDR,
+ WMI_TAG_BCN_SEND_FROM_HOST_CMD,
+ WMI_TAG_MGMT_TX_HDR,
+ WMI_TAG_ADDBA_CLEAR_RESP_CMD,
+ WMI_TAG_ADDBA_SEND_CMD,
+ WMI_TAG_DELBA_SEND_CMD,
+ WMI_TAG_ADDBA_SETRESPONSE_CMD,
+ WMI_TAG_SEND_SINGLEAMSDU_CMD,
+ WMI_TAG_PDEV_PKTLOG_ENABLE_CMD,
+ WMI_TAG_PDEV_PKTLOG_DISABLE_CMD,
+ WMI_TAG_PDEV_SET_HT_IE_CMD,
+ WMI_TAG_PDEV_SET_VHT_IE_CMD,
+ WMI_TAG_PDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TAG_PDEV_GREEN_AP_PS_ENABLE_CMD,
+ WMI_TAG_PDEV_GET_TPC_CONFIG_CMD,
+ WMI_TAG_PDEV_SET_BASE_MACADDR_CMD,
+ WMI_TAG_PEER_MCAST_GROUP_CMD,
+ WMI_TAG_ROAM_AP_PROFILE,
+ WMI_TAG_AP_PROFILE,
+ WMI_TAG_SCAN_SCH_PRIORITY_TABLE_CMD,
+ WMI_TAG_PDEV_DFS_ENABLE_CMD,
+ WMI_TAG_PDEV_DFS_DISABLE_CMD,
+ WMI_TAG_WOW_ADD_PATTERN_CMD,
+ WMI_TAG_WOW_BITMAP_PATTERN_T,
+ WMI_TAG_WOW_IPV4_SYNC_PATTERN_T,
+ WMI_TAG_WOW_IPV6_SYNC_PATTERN_T,
+ WMI_TAG_WOW_MAGIC_PATTERN_CMD,
+ WMI_TAG_SCAN_UPDATE_REQUEST_CMD,
+ WMI_TAG_CHATTER_PKT_COALESCING_FILTER,
+ WMI_TAG_CHATTER_COALESCING_ADD_FILTER_CMD,
+ WMI_TAG_CHATTER_COALESCING_DELETE_FILTER_CMD,
+ WMI_TAG_CHATTER_COALESCING_QUERY_CMD,
+ WMI_TAG_TXBF_CMD,
+ WMI_TAG_DEBUG_LOG_CONFIG_CMD,
+ WMI_TAG_NLO_EVENT,
+ WMI_TAG_CHATTER_QUERY_REPLY_EVENT,
+ WMI_TAG_UPLOAD_H_HDR,
+ WMI_TAG_CAPTURE_H_EVENT_HDR,
+ WMI_TAG_VDEV_WNM_SLEEPMODE_CMD,
+ WMI_TAG_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
+ WMI_TAG_VDEV_WMM_ADDTS_CMD,
+ WMI_TAG_VDEV_WMM_DELTS_CMD,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
+ WMI_TAG_TDLS_SET_STATE_CMD,
+ WMI_TAG_TDLS_PEER_UPDATE_CMD,
+ WMI_TAG_TDLS_PEER_EVENT,
+ WMI_TAG_TDLS_PEER_CAPABILITIES,
+ WMI_TAG_VDEV_MCC_SET_TBTT_MODE_CMD,
+ WMI_TAG_ROAM_CHAN_LIST,
+ WMI_TAG_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
+ WMI_TAG_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMD,
+ WMI_TAG_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
+ WMI_TAG_RESMGR_SET_CHAN_LATENCY_CMD,
+ WMI_TAG_BA_REQ_SSN_CMD,
+ WMI_TAG_BA_RSP_SSN_EVENT,
+ WMI_TAG_STA_SMPS_FORCE_MODE_CMD,
+ WMI_TAG_SET_MCASTBCAST_FILTER_CMD,
+ WMI_TAG_P2P_SET_OPPPS_CMD,
+ WMI_TAG_P2P_SET_NOA_CMD,
+ WMI_TAG_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
+ WMI_TAG_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
+ WMI_TAG_STA_SMPS_PARAM_CMD,
+ WMI_TAG_VDEV_SET_GTX_PARAMS_CMD,
+ WMI_TAG_MCC_SCHED_TRAFFIC_STATS_CMD,
+ WMI_TAG_MCC_SCHED_STA_TRAFFIC_STATS,
+ WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT,
+ WMI_TAG_P2P_NOA_EVENT,
+ WMI_TAG_HB_SET_ENABLE_CMD,
+ WMI_TAG_HB_SET_TCP_PARAMS_CMD,
+ WMI_TAG_HB_SET_TCP_PKT_FILTER_CMD,
+ WMI_TAG_HB_SET_UDP_PARAMS_CMD,
+ WMI_TAG_HB_SET_UDP_PKT_FILTER_CMD,
+ WMI_TAG_HB_IND_EVENT,
+ WMI_TAG_TX_PAUSE_EVENT,
+ WMI_TAG_RFKILL_EVENT,
+ WMI_TAG_DFS_RADAR_EVENT,
+ WMI_TAG_DFS_PHYERR_FILTER_ENA_CMD,
+ WMI_TAG_DFS_PHYERR_FILTER_DIS_CMD,
+ WMI_TAG_BATCH_SCAN_RESULT_SCAN_LIST,
+ WMI_TAG_BATCH_SCAN_RESULT_NETWORK_INFO,
+ WMI_TAG_BATCH_SCAN_ENABLE_CMD,
+ WMI_TAG_BATCH_SCAN_DISABLE_CMD,
+ WMI_TAG_BATCH_SCAN_TRIGGER_RESULT_CMD,
+ WMI_TAG_BATCH_SCAN_ENABLED_EVENT,
+ WMI_TAG_BATCH_SCAN_RESULT_EVENT,
+ WMI_TAG_VDEV_PLMREQ_START_CMD,
+ WMI_TAG_VDEV_PLMREQ_STOP_CMD,
+ WMI_TAG_THERMAL_MGMT_CMD,
+ WMI_TAG_THERMAL_MGMT_EVENT,
+ WMI_TAG_PEER_INFO_REQ_CMD,
+ WMI_TAG_PEER_INFO_EVENT,
+ WMI_TAG_PEER_INFO,
+ WMI_TAG_PEER_TX_FAIL_CNT_THR_EVENT,
+ WMI_TAG_RMC_SET_MODE_CMD,
+ WMI_TAG_RMC_SET_ACTION_PERIOD_CMD,
+ WMI_TAG_RMC_CONFIG_CMD,
+ WMI_TAG_MHF_OFFLOAD_SET_MODE_CMD,
+ WMI_TAG_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
+ WMI_TAG_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TAG_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TAG_NAN_CMD_PARAM,
+ WMI_TAG_NAN_EVENT_HDR,
+ WMI_TAG_PDEV_L1SS_TRACK_EVENT,
+ WMI_TAG_DIAG_DATA_CONTAINER_EVENT,
+ WMI_TAG_MODEM_POWER_STATE_CMD_PARAM,
+ WMI_TAG_PEER_GET_ESTIMATED_LINKSPEED_CMD,
+ WMI_TAG_PEER_ESTIMATED_LINKSPEED_EVENT,
+ WMI_TAG_AGGR_STATE_TRIG_EVENT,
+ WMI_TAG_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
+ WMI_TAG_ROAM_SCAN_CMD,
+ WMI_TAG_REQ_STATS_EXT_CMD,
+ WMI_TAG_STATS_EXT_EVENT,
+ WMI_TAG_OBSS_SCAN_ENABLE_CMD,
+ WMI_TAG_OBSS_SCAN_DISABLE_CMD,
+ WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
+ WMI_TAG_PDEV_SET_LED_CONFIG_CMD,
+ WMI_TAG_HOST_AUTO_SHUTDOWN_CFG_CMD,
+ WMI_TAG_HOST_AUTO_SHUTDOWN_EVENT,
+ WMI_TAG_UPDATE_WHAL_MIB_STATS_EVENT,
+ WMI_TAG_CHAN_AVOID_UPDATE_CMD_PARAM,
+ WMI_TAG_WOW_IOAC_PKT_PATTERN_T,
+ WMI_TAG_WOW_IOAC_TMR_PATTERN_T,
+ WMI_TAG_WOW_IOAC_ADD_KEEPALIVE_CMD,
+ WMI_TAG_WOW_IOAC_DEL_KEEPALIVE_CMD,
+ WMI_TAG_WOW_IOAC_KEEPALIVE_T,
+ WMI_TAG_WOW_IOAC_ADD_PATTERN_CMD,
+ WMI_TAG_WOW_IOAC_DEL_PATTERN_CMD,
+ WMI_TAG_START_LINK_STATS_CMD,
+ WMI_TAG_CLEAR_LINK_STATS_CMD,
+ WMI_TAG_REQUEST_LINK_STATS_CMD,
+ WMI_TAG_IFACE_LINK_STATS_EVENT,
+ WMI_TAG_RADIO_LINK_STATS_EVENT,
+ WMI_TAG_PEER_STATS_EVENT,
+ WMI_TAG_CHANNEL_STATS,
+ WMI_TAG_RADIO_LINK_STATS,
+ WMI_TAG_RATE_STATS,
+ WMI_TAG_PEER_LINK_STATS,
+ WMI_TAG_WMM_AC_STATS,
+ WMI_TAG_IFACE_LINK_STATS,
+ WMI_TAG_LPI_MGMT_SNOOPING_CONFIG_CMD,
+ WMI_TAG_LPI_START_SCAN_CMD,
+ WMI_TAG_LPI_STOP_SCAN_CMD,
+ WMI_TAG_LPI_RESULT_EVENT,
+ WMI_TAG_PEER_STATE_EVENT,
+ WMI_TAG_EXTSCAN_BUCKET_CMD,
+ WMI_TAG_EXTSCAN_BUCKET_CHANNEL_EVENT,
+ WMI_TAG_EXTSCAN_START_CMD,
+ WMI_TAG_EXTSCAN_STOP_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_GET_CACHED_RESULTS_CMD,
+ WMI_TAG_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
+ WMI_TAG_EXTSCAN_SET_CAPABILITIES_CMD,
+ WMI_TAG_EXTSCAN_GET_CAPABILITIES_CMD,
+ WMI_TAG_EXTSCAN_OPERATION_EVENT,
+ WMI_TAG_EXTSCAN_START_STOP_EVENT,
+ WMI_TAG_EXTSCAN_TABLE_USAGE_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
+ WMI_TAG_EXTSCAN_RSSI_INFO_EVENT,
+ WMI_TAG_EXTSCAN_CACHED_RESULTS_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
+ WMI_TAG_EXTSCAN_HOTLIST_MATCH_EVENT,
+ WMI_TAG_EXTSCAN_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_CACHE_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
+ WMI_TAG_D0_WOW_ENABLE_DISABLE_CMD,
+ WMI_TAG_D0_WOW_DISABLE_ACK_EVENT,
+ WMI_TAG_UNIT_TEST_CMD,
+ WMI_TAG_ROAM_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_11I_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_11R_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_ESE_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_SYNCH_EVENT,
+ WMI_TAG_ROAM_SYNCH_COMPLETE,
+ WMI_TAG_EXTWOW_ENABLE_CMD,
+ WMI_TAG_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
+ WMI_TAG_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
+ WMI_TAG_LPI_STATUS_EVENT,
+ WMI_TAG_LPI_HANDOFF_EVENT,
+ WMI_TAG_VDEV_RATE_STATS_EVENT,
+ WMI_TAG_VDEV_RATE_HT_INFO,
+ WMI_TAG_RIC_REQUEST,
+ WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
+ WMI_TAG_PDEV_TEMPERATURE_EVENT,
+ WMI_TAG_SET_DHCP_SERVER_OFFLOAD_CMD,
+ WMI_TAG_TPC_CHAINMASK_CONFIG_CMD,
+ WMI_TAG_RIC_TSPEC,
+ WMI_TAG_TPC_CHAINMASK_CONFIG,
+ WMI_TAG_IPA_OFFLOAD_ENABLE_DISABLE_CMD,
+ WMI_TAG_SCAN_PROB_REQ_OUI_CMD,
+ WMI_TAG_KEY_MATERIAL,
+ WMI_TAG_TDLS_SET_OFFCHAN_MODE_CMD,
+ WMI_TAG_SET_LED_FLASHING_CMD,
+ WMI_TAG_MDNS_OFFLOAD_CMD,
+ WMI_TAG_MDNS_SET_FQDN_CMD,
+ WMI_TAG_MDNS_SET_RESP_CMD,
+ WMI_TAG_MDNS_GET_STATS_CMD,
+ WMI_TAG_MDNS_STATS_EVENT,
+ WMI_TAG_ROAM_INVOKE_CMD,
+ WMI_TAG_PDEV_RESUME_EVENT,
+ WMI_TAG_PDEV_SET_ANTENNA_DIVERSITY_CMD,
+ WMI_TAG_SAP_OFL_ENABLE_CMD,
+ WMI_TAG_SAP_OFL_ADD_STA_EVENT,
+ WMI_TAG_SAP_OFL_DEL_STA_EVENT,
+ WMI_TAG_APFIND_CMD_PARAM,
+ WMI_TAG_APFIND_EVENT_HDR,
+ WMI_TAG_OCB_SET_SCHED_CMD,
+ WMI_TAG_OCB_SET_SCHED_EVENT,
+ WMI_TAG_OCB_SET_CONFIG_CMD,
+ WMI_TAG_OCB_SET_CONFIG_RESP_EVENT,
+ WMI_TAG_OCB_SET_UTC_TIME_CMD,
+ WMI_TAG_OCB_START_TIMING_ADVERT_CMD,
+ WMI_TAG_OCB_STOP_TIMING_ADVERT_CMD,
+ WMI_TAG_OCB_GET_TSF_TIMER_CMD,
+ WMI_TAG_OCB_GET_TSF_TIMER_RESP_EVENT,
+ WMI_TAG_DCC_GET_STATS_CMD,
+ WMI_TAG_DCC_CHANNEL_STATS_REQUEST,
+ WMI_TAG_DCC_GET_STATS_RESP_EVENT,
+ WMI_TAG_DCC_CLEAR_STATS_CMD,
+ WMI_TAG_DCC_UPDATE_NDL_CMD,
+ WMI_TAG_DCC_UPDATE_NDL_RESP_EVENT,
+ WMI_TAG_DCC_STATS_EVENT,
+ WMI_TAG_OCB_CHANNEL,
+ WMI_TAG_OCB_SCHEDULE_ELEMENT,
+ WMI_TAG_DCC_NDL_STATS_PER_CHANNEL,
+ WMI_TAG_DCC_NDL_CHAN,
+ WMI_TAG_QOS_PARAMETER,
+ WMI_TAG_DCC_NDL_ACTIVE_STATE_CONFIG,
+ WMI_TAG_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM,
+ WMI_TAG_ROAM_FILTER,
+ WMI_TAG_PASSPOINT_CONFIG_CMD,
+ WMI_TAG_PASSPOINT_EVENT_HDR,
+ WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_HOTLIST_SSID_MATCH_EVENT,
+ WMI_TAG_VDEV_TSF_TSTAMP_ACTION_CMD,
+ WMI_TAG_VDEV_TSF_REPORT_EVENT,
+ WMI_TAG_GET_FW_MEM_DUMP,
+ WMI_TAG_UPDATE_FW_MEM_DUMP,
+ WMI_TAG_FW_MEM_DUMP_PARAMS,
+ WMI_TAG_DEBUG_MESG_FLUSH,
+ WMI_TAG_DEBUG_MESG_FLUSH_COMPLETE,
+ WMI_TAG_PEER_SET_RATE_REPORT_CONDITION,
+ WMI_TAG_ROAM_SUBNET_CHANGE_CONFIG,
+ WMI_TAG_VDEV_SET_IE_CMD,
+ WMI_TAG_RSSI_BREACH_MONITOR_CONFIG,
+ WMI_TAG_RSSI_BREACH_EVENT,
+ WMI_TAG_WOW_EVENT_INITIAL_WAKEUP,
+ WMI_TAG_SOC_SET_PCL_CMD,
+ WMI_TAG_SOC_SET_HW_MODE_CMD,
+ WMI_TAG_SOC_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TAG_SOC_HW_MODE_TRANSITION_EVENT,
+ WMI_TAG_VDEV_TXRX_STREAMS,
+ WMI_TAG_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_CMD,
+ WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TAG_WOW_IOAC_SOCK_PATTERN_T,
+ WMI_TAG_WOW_ENABLE_ICMPV6_NA_FLT_CMD,
+ WMI_TAG_DIAG_EVENT_LOG_CONFIG,
+ WMI_TAG_DIAG_EVENT_LOG_SUPPORTED_EVENT_FIXED_PARAMS,
+ WMI_TAG_PACKET_FILTER_CONFIG,
+ WMI_TAG_PACKET_FILTER_ENABLE,
+ WMI_TAG_SAP_SET_BLACKLIST_PARAM_CMD,
+ WMI_TAG_MGMT_TX_SEND_CMD,
+ WMI_TAG_MGMT_TX_COMPL_EVENT,
+ WMI_TAG_SOC_SET_ANTENNA_MODE_CMD,
+ WMI_TAG_WOW_UDP_SVC_OFLD_CMD,
+ WMI_TAG_LRO_INFO_CMD,
+ WMI_TAG_ROAM_EARLYSTOP_RSSI_THRES_PARAM,
+ WMI_TAG_SERVICE_READY_EXT_EVENT,
+ WMI_TAG_MAWC_SENSOR_REPORT_IND_CMD,
+ WMI_TAG_MAWC_ENABLE_SENSOR_EVENT,
+ WMI_TAG_ROAM_CONFIGURE_MAWC_CMD,
+ WMI_TAG_NLO_CONFIGURE_MAWC_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_MAWC_CMD,
+ WMI_TAG_PEER_ASSOC_CONF_EVENT,
+ WMI_TAG_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD,
+ WMI_TAG_AP_PS_EGAP_PARAM_CMD,
+ WMI_TAG_AP_PS_EGAP_INFO_EVENT,
+ WMI_TAG_PMF_OFFLOAD_SET_SA_QUERY_CMD,
+ WMI_TAG_TRANSFER_DATA_TO_FLASH_CMD,
+ WMI_TAG_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT,
+ WMI_TAG_SCPC_EVENT,
+ WMI_TAG_AP_PS_EGAP_INFO_CHAINMASK_LIST,
+ WMI_TAG_STA_SMPS_FORCE_MODE_COMPLETE_EVENT,
+ WMI_TAG_BPF_GET_CAPABILITY_CMD,
+ WMI_TAG_BPF_CAPABILITY_INFO_EVT,
+ WMI_TAG_BPF_GET_VDEV_STATS_CMD,
+ WMI_TAG_BPF_VDEV_STATS_INFO_EVT,
+ WMI_TAG_BPF_SET_VDEV_INSTRUCTIONS_CMD,
+ WMI_TAG_BPF_DEL_VDEV_INSTRUCTIONS_CMD,
+ WMI_TAG_VDEV_DELETE_RESP_EVENT,
+ WMI_TAG_PEER_DELETE_RESP_EVENT,
+ WMI_TAG_ROAM_DENSE_THRES_PARAM,
+ WMI_TAG_ENLO_CANDIDATE_SCORE_PARAM,
+ WMI_TAG_PEER_UPDATE_WDS_ENTRY_CMD,
+ WMI_TAG_VDEV_CONFIG_RATEMASK,
+ WMI_TAG_PDEV_FIPS_CMD,
+ WMI_TAG_PDEV_SMART_ANT_ENABLE_CMD,
+ WMI_TAG_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD,
+ WMI_TAG_PDEV_SET_ANT_SWITCH_TBL_CMD,
+ WMI_TAG_PDEV_SET_CTL_TABLE_CMD,
+ WMI_TAG_PDEV_SET_MIMOGAIN_TABLE_CMD,
+ WMI_TAG_FWTEST_SET_PARAM_CMD,
+ WMI_TAG_PEER_ATF_REQUEST,
+ WMI_TAG_VDEV_ATF_REQUEST,
+ WMI_TAG_PDEV_GET_ANI_CCK_CONFIG_CMD,
+ WMI_TAG_PDEV_GET_ANI_OFDM_CONFIG_CMD,
+ WMI_TAG_INST_RSSI_STATS_RESP,
+ WMI_TAG_MED_UTIL_REPORT_EVENT,
+ WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT,
+ WMI_TAG_WDS_ADDR_EVENT,
+ WMI_TAG_PEER_RATECODE_LIST_EVENT,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT,
+ WMI_TAG_PDEV_TPC_EVENT,
+ WMI_TAG_ANI_OFDM_EVENT,
+ WMI_TAG_ANI_CCK_EVENT,
+ WMI_TAG_PDEV_CHANNEL_HOPPING_EVENT,
+ WMI_TAG_PDEV_FIPS_EVENT,
+ WMI_TAG_ATF_PEER_INFO,
+ WMI_TAG_PDEV_GET_TPC_CMD,
+ WMI_TAG_VDEV_FILTER_NRP_CONFIG_CMD,
+ WMI_TAG_QBOOST_CFG_CMD,
+ WMI_TAG_PDEV_SMART_ANT_GPIO_HANDLE,
+ WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES,
+ WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM,
+ WMI_TAG_PDEV_SET_ANT_CTRL_CHAIN,
+ WMI_TAG_PEER_CCK_OFDM_RATE_INFO,
+ WMI_TAG_PEER_MCS_RATE_INFO,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM,
+ WMI_TAG_MU_REPORT_TOTAL_MU,
+ WMI_TAG_VDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TAG_ROAM_SET_MBO,
+ WMI_TAG_MIB_STATS_ENABLE_CMD,
+ WMI_TAG_NAN_DISC_IFACE_CREATED_EVENT,
+ WMI_TAG_NAN_DISC_IFACE_DELETED_EVENT,
+ WMI_TAG_NAN_STARTED_CLUSTER_EVENT,
+ WMI_TAG_NAN_JOINED_CLUSTER_EVENT,
+ WMI_TAG_NDI_GET_CAP_REQ,
+ WMI_TAG_NDP_INITIATOR_REQ,
+ WMI_TAG_NDP_RESPONDER_REQ,
+ WMI_TAG_NDP_END_REQ,
+ WMI_TAG_NDI_CAP_RSP_EVENT,
+ WMI_TAG_NDP_INITIATOR_RSP_EVENT,
+ WMI_TAG_NDP_RESPONDER_RSP_EVENT,
+ WMI_TAG_NDP_END_RSP_EVENT,
+ WMI_TAG_NDP_INDICATION_EVENT,
+ WMI_TAG_NDP_CONFIRM_EVENT,
+ WMI_TAG_NDP_END_INDICATION_EVENT,
+ WMI_TAG_VDEV_SET_QUIET_CMD,
+ WMI_TAG_PDEV_SET_PCL_CMD,
+ WMI_TAG_PDEV_SET_HW_MODE_CMD,
+ WMI_TAG_PDEV_SET_MAC_CONFIG_CMD,
+ WMI_TAG_PDEV_SET_ANTENNA_MODE_CMD,
+ WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TAG_PDEV_HW_MODE_TRANSITION_EVENT,
+ WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TAG_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TAG_COEX_CONFIG_CMD,
+ WMI_TAG_CONFIG_ENHANCED_MCAST_FILTER,
+ WMI_TAG_CHAN_AVOID_RPT_ALLOW_CMD,
+ WMI_TAG_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+ WMI_TAG_VDEV_SET_CUSTOM_AGGR_SIZE_CMD,
+ WMI_TAG_PDEV_WAL_POWER_DEBUG_CMD,
+ WMI_TAG_MAC_PHY_CAPABILITIES,
+ WMI_TAG_HW_MODE_CAPABILITIES,
+ WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS,
+ WMI_TAG_HAL_REG_CAPABILITIES_EXT,
+ WMI_TAG_SOC_HAL_REG_CAPABILITIES,
+ WMI_TAG_VDEV_WISA_CMD,
+ WMI_TAG_TX_POWER_LEVEL_STATS_EVT,
+ WMI_TAG_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV,
+ WMI_TAG_SCAN_ADAPTIVE_DWELL_CONFIG,
+ WMI_TAG_WOW_SET_ACTION_WAKE_UP_CMD,
+ WMI_TAG_NDP_END_RSP_PER_NDI,
+ WMI_TAG_PEER_BWF_REQUEST,
+ WMI_TAG_BWF_PEER_INFO,
+ WMI_TAG_DBGLOG_TIME_STAMP_SYNC_CMD,
+ WMI_TAG_RMC_SET_LEADER_CMD,
+ WMI_TAG_RMC_MANUAL_LEADER_EVENT,
+ WMI_TAG_PER_CHAIN_RSSI_STATS,
+ WMI_TAG_RSSI_STATS,
+ WMI_TAG_P2P_LO_START_CMD,
+ WMI_TAG_P2P_LO_STOP_CMD,
+ WMI_TAG_P2P_LO_STOPPED_EVENT,
+ WMI_TAG_REORDER_QUEUE_SETUP_CMD,
+ WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
+ WMI_TAG_SET_MULTIPLE_MCAST_FILTER_CMD,
+ WMI_TAG_MGMT_TX_COMPL_BUNDLE_EVENT,
+ WMI_TAG_READ_DATA_FROM_FLASH_CMD,
+ WMI_TAG_READ_DATA_FROM_FLASH_EVENT,
+ WMI_TAG_PDEV_SET_REORDER_TIMEOUT_VAL_CMD,
+ WMI_TAG_PEER_SET_RX_BLOCKSIZE_CMD,
+ WMI_TAG_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_TAG_TLV_BUF_LEN_PARAM,
+ WMI_TAG_SERVICE_AVAILABLE_EVENT,
+ WMI_TAG_PEER_ANTDIV_INFO_REQ_CMD,
+ WMI_TAG_PEER_ANTDIV_INFO_EVENT,
+ WMI_TAG_PEER_ANTDIV_INFO,
+ WMI_TAG_PDEV_GET_ANTDIV_STATUS_CMD,
+ WMI_TAG_PDEV_ANTDIV_STATUS_EVENT,
+ WMI_TAG_MNT_FILTER_CMD,
+ WMI_TAG_GET_CHIP_POWER_STATS_CMD,
+ WMI_TAG_PDEV_CHIP_POWER_STATS_EVENT,
+ WMI_TAG_COEX_GET_ANTENNA_ISOLATION_CMD,
+ WMI_TAG_COEX_REPORT_ISOLATION_EVENT,
+ WMI_TAG_CHAN_CCA_STATS,
+ WMI_TAG_PEER_SIGNAL_STATS,
+ WMI_TAG_TX_STATS,
+ WMI_TAG_PEER_AC_TX_STATS,
+ WMI_TAG_RX_STATS,
+ WMI_TAG_PEER_AC_RX_STATS,
+ WMI_TAG_REPORT_STATS_EVENT,
+ WMI_TAG_CHAN_CCA_STATS_THRESH,
+ WMI_TAG_PEER_SIGNAL_STATS_THRESH,
+ WMI_TAG_TX_STATS_THRESH,
+ WMI_TAG_RX_STATS_THRESH,
+ WMI_TAG_PDEV_SET_STATS_THRESHOLD_CMD,
+ WMI_TAG_REQUEST_WLAN_STATS_CMD,
+ WMI_TAG_RX_AGGR_FAILURE_EVENT,
+ WMI_TAG_RX_AGGR_FAILURE_INFO,
+ WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD,
+ WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT,
+ WMI_TAG_PDEV_BAND_TO_MAC,
+ WMI_TAG_TBTT_OFFSET_INFO,
+ WMI_TAG_TBTT_OFFSET_EXT_EVENT,
+ WMI_TAG_SAR_LIMITS_CMD,
+ WMI_TAG_SAR_LIMIT_CMD_ROW,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD,
+ WMI_TAG_VDEV_ADFS_CH_CFG_CMD,
+ WMI_TAG_VDEV_ADFS_OCAC_ABORT_CMD,
+ WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT,
+ WMI_TAG_VDEV_ADFS_OCAC_COMPLETE_EVENT,
+ WMI_TAG_VDEV_DFS_CAC_COMPLETE_EVENT,
+ WMI_TAG_VENDOR_OUI,
+ WMI_TAG_REQUEST_RCPI_CMD,
+ WMI_TAG_UPDATE_RCPI_EVENT,
+ WMI_TAG_REQUEST_PEER_STATS_INFO_CMD,
+ WMI_TAG_PEER_STATS_INFO,
+ WMI_TAG_PEER_STATS_INFO_EVENT,
+ WMI_TAG_PKGID_EVENT,
+ WMI_TAG_CONNECTED_NLO_RSSI_PARAMS,
+ WMI_TAG_SET_CURRENT_COUNTRY_CMD,
+ WMI_TAG_REGULATORY_RULE_STRUCT,
+ WMI_TAG_REG_CHAN_LIST_CC_EVENT,
+ WMI_TAG_11D_SCAN_START_CMD,
+ WMI_TAG_11D_SCAN_STOP_CMD,
+ WMI_TAG_11D_NEW_COUNTRY_EVENT,
+ WMI_TAG_REQUEST_RADIO_CHAN_STATS_CMD,
+ WMI_TAG_RADIO_CHAN_STATS,
+ WMI_TAG_RADIO_CHAN_STATS_EVENT,
+ WMI_TAG_ROAM_PER_CONFIG,
+ WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD,
+ WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT,
+ WMI_TAG_BPF_SET_VDEV_ACTIVE_MODE_CMD,
+ WMI_TAG_HW_DATA_FILTER_CMD,
+ WMI_TAG_CONNECTED_NLO_BSS_BAND_RSSI_PREF,
+ WMI_TAG_PEER_OPER_MODE_CHANGE_EVENT,
+ WMI_TAG_CHIP_POWER_SAVE_FAILURE_DETECTED,
+ WMI_TAG_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD,
+ WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT,
+ WMI_TAG_PDEV_UPDATE_PKT_ROUTING_CMD,
+ WMI_TAG_PDEV_CHECK_CAL_VERSION_CMD,
+ WMI_TAG_PDEV_CHECK_CAL_VERSION_EVENT,
+ WMI_TAG_PDEV_SET_DIVERSITY_GAIN_CMD,
+ WMI_TAG_MAC_PHY_CHAINMASK_COMBO,
+ WMI_TAG_MAC_PHY_CHAINMASK_CAPABILITY,
+ WMI_TAG_VDEV_SET_ARP_STATS_CMD,
+ WMI_TAG_VDEV_GET_ARP_STATS_CMD,
+ WMI_TAG_VDEV_GET_ARP_STATS_EVENT,
+ WMI_TAG_IFACE_OFFLOAD_STATS,
+ WMI_TAG_REQUEST_STATS_CMD_SUB_STRUCT_PARAM,
+ WMI_TAG_RSSI_CTL_EXT,
+ WMI_TAG_SINGLE_PHYERR_EXT_RX_HDR,
+ WMI_TAG_COEX_BT_ACTIVITY_EVENT,
+ WMI_TAG_VDEV_GET_TX_POWER_CMD,
+ WMI_TAG_VDEV_TX_POWER_EVENT,
+ WMI_TAG_OFFCHAN_DATA_TX_COMPL_EVENT,
+ WMI_TAG_OFFCHAN_DATA_TX_SEND_CMD,
+ WMI_TAG_TX_SEND_PARAMS,
+ WMI_TAG_HE_RATE_SET,
+ WMI_TAG_CONGESTION_STATS,
+ WMI_TAG_SET_INIT_COUNTRY_CMD,
+ WMI_TAG_SCAN_DBS_DUTY_CYCLE,
+ WMI_TAG_SCAN_DBS_DUTY_CYCLE_PARAM_TLV,
+ WMI_TAG_PDEV_DIV_GET_RSSI_ANTID,
+ WMI_TAG_THERM_THROT_CONFIG_REQUEST,
+ WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO,
+ WMI_TAG_THERM_THROT_STATS_EVENT,
+ WMI_TAG_THERM_THROT_LEVEL_STATS_INFO,
+ WMI_TAG_PDEV_DIV_RSSI_ANTID_EVENT,
+ WMI_TAG_OEM_DMA_RING_CAPABILITIES,
+ WMI_TAG_OEM_DMA_RING_CFG_REQ,
+ WMI_TAG_OEM_DMA_RING_CFG_RSP,
+ WMI_TAG_OEM_INDIRECT_DATA,
+ WMI_TAG_OEM_DMA_BUF_RELEASE,
+ WMI_TAG_OEM_DMA_BUF_RELEASE_ENTRY,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT,
+ WMI_TAG_ROAM_LCA_DISALLOW_CONFIG,
+ WMI_TAG_VDEV_LIMIT_OFFCHAN_CMD,
+ WMI_TAG_ROAM_RSSI_REJECTION_OCE_CONFIG,
+ WMI_TAG_UNIT_TEST_EVENT,
+ WMI_TAG_ROAM_FILS_OFFLOAD,
+ WMI_TAG_PDEV_UPDATE_PMK_CACHE_CMD,
+ WMI_TAG_PMK_CACHE,
+ WMI_TAG_PDEV_UPDATE_FILS_HLP_PKT_CMD,
+ WMI_TAG_ROAM_FILS_SYNCH,
+ WMI_TAG_GTK_OFFLOAD_EXTENDED,
+ WMI_TAG_ROAM_BG_SCAN_ROAMING,
+ WMI_TAG_OIC_PING_OFFLOAD_PARAMS_CMD,
+ WMI_TAG_OIC_PING_OFFLOAD_SET_ENABLE_CMD,
+ WMI_TAG_OIC_PING_HANDOFF_EVENT,
+ WMI_TAG_DHCP_LEASE_RENEW_OFFLOAD_CMD,
+ WMI_TAG_DHCP_LEASE_RENEW_EVENT,
+ WMI_TAG_BTM_CONFIG,
+ WMI_TAG_DEBUG_MESG_FW_DATA_STALL,
+ WMI_TAG_WLM_CONFIG_CMD,
+ WMI_TAG_PDEV_UPDATE_CTLTABLE_REQUEST,
+ WMI_TAG_PDEV_UPDATE_CTLTABLE_EVENT,
+ WMI_TAG_ROAM_CND_SCORING_PARAM,
+ WMI_TAG_PDEV_CONFIG_VENDOR_OUI_ACTION,
+ WMI_TAG_VENDOR_OUI_EXT,
+ WMI_TAG_ROAM_SYNCH_FRAME_EVENT,
+ WMI_TAG_FD_SEND_FROM_HOST_CMD,
+ WMI_TAG_ENABLE_FILS_CMD,
+ WMI_TAG_HOST_SWFDA_EVENT,
+ WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
+ WMI_TAG_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMD,
+ WMI_TAG_STATS_PERIOD,
+ WMI_TAG_NDL_SCHEDULE_UPDATE,
+ WMI_TAG_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMD,
+ WMI_TAG_MSDUQ_QDEPTH_THRESH_UPDATE,
+ WMI_TAG_PDEV_SET_RX_FILTER_PROMISCUOUS_CMD,
+ WMI_TAG_SAR2_RESULT_EVENT,
+ WMI_TAG_SAR_CAPABILITIES,
+ WMI_TAG_SAP_OBSS_DETECTION_CFG_CMD,
+ WMI_TAG_SAP_OBSS_DETECTION_INFO_EVT,
+ WMI_TAG_DMA_RING_CAPABILITIES,
+ WMI_TAG_DMA_RING_CFG_REQ,
+ WMI_TAG_DMA_RING_CFG_RSP,
+ WMI_TAG_DMA_BUF_RELEASE,
+ WMI_TAG_DMA_BUF_RELEASE_ENTRY,
+ WMI_TAG_SAR_GET_LIMITS_CMD,
+ WMI_TAG_SAR_GET_LIMITS_EVENT,
+ WMI_TAG_SAR_GET_LIMITS_EVENT_ROW,
+ WMI_TAG_OFFLOAD_11K_REPORT,
+ WMI_TAG_INVOKE_NEIGHBOR_REPORT,
+ WMI_TAG_NEIGHBOR_REPORT_OFFLOAD,
+ WMI_TAG_VDEV_SET_CONNECTIVITY_CHECK_STATS,
+ WMI_TAG_VDEV_GET_CONNECTIVITY_CHECK_STATS,
+ WMI_TAG_BPF_SET_VDEV_ENABLE_CMD,
+ WMI_TAG_BPF_SET_VDEV_WORK_MEMORY_CMD,
+ WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_CMD,
+ WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_RESP_EVT,
+ WMI_TAG_PDEV_GET_NFCAL_POWER,
+ WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
+ WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
+ WMI_TAG_OBSS_COLOR_COLLISION_EVT,
+ WMI_TAG_RUNTIME_DPD_RECAL_CMD,
+ WMI_TAG_TWT_ENABLE_CMD,
+ WMI_TAG_TWT_DISABLE_CMD,
+ WMI_TAG_TWT_ADD_DIALOG_CMD,
+ WMI_TAG_TWT_DEL_DIALOG_CMD,
+ WMI_TAG_TWT_PAUSE_DIALOG_CMD,
+ WMI_TAG_TWT_RESUME_DIALOG_CMD,
+ WMI_TAG_TWT_ENABLE_COMPLETE_EVENT,
+ WMI_TAG_TWT_DISABLE_COMPLETE_EVENT,
+ WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_DEL_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_PAUSE_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_RESUME_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_REQUEST_ROAM_SCAN_STATS_CMD,
+ WMI_TAG_ROAM_SCAN_STATS_EVENT,
+ WMI_TAG_PEER_TID_CONFIGURATIONS_CMD,
+ WMI_TAG_VDEV_SET_CUSTOM_SW_RETRY_TH_CMD,
+ WMI_TAG_GET_TPC_POWER_CMD,
+ WMI_TAG_GET_TPC_POWER_EVENT,
+ WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA,
+ WMI_TAG_MOTION_DET_CONFIG_PARAMS_CMD,
+ WMI_TAG_MOTION_DET_BASE_LINE_CONFIG_PARAMS_CMD,
+ WMI_TAG_MOTION_DET_START_STOP_CMD,
+ WMI_TAG_MOTION_DET_BASE_LINE_START_STOP_CMD,
+ WMI_TAG_MOTION_DET_EVENT,
+ WMI_TAG_MOTION_DET_BASE_LINE_EVENT,
+ WMI_TAG_NDP_TRANSPORT_IP,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
+ WMI_TAG_ESP_ESTIMATE_EVENT,
+ WMI_TAG_NAN_HOST_CONFIG,
+ WMI_TAG_SPECTRAL_BIN_SCALING_PARAMS,
+ WMI_TAG_PEER_CFR_CAPTURE_CMD,
+ WMI_TAG_PEER_CHAN_WIDTH_SWITCH_CMD,
+ WMI_TAG_CHAN_WIDTH_PEER_LIST,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMD,
+ WMI_TAG_PDEV_HE_TB_ACTION_FRM_CMD,
+ WMI_TAG_PEER_EXTD2_STATS,
+ WMI_TAG_HPCS_PULSE_START_CMD,
+ WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT,
+ WMI_TAG_VDEV_CHAINMASK_CONFIG_CMD,
+ WMI_TAG_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMD,
+ WMI_TAG_NAN_EVENT_INFO,
+ WMI_TAG_NDP_CHANNEL_INFO,
+ WMI_TAG_NDP_CMD,
+ WMI_TAG_NDP_EVENT,
+ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301,
+ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO,
+ WMI_TAG_FILS_DISCOVERY_TMPL_CMD = 0x344,
+ WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD = 0x37b,
+ WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD,
+ WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD = 0x381,
+ WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
+ WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD,
+ WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
+ WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
+ WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
+ WMI_TAG_VDEV_SET_TPC_POWER_CMD = 0x3B5,
+ WMI_TAG_VDEV_CH_POWER_INFO,
+ WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
+ WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
+ WMI_TAG_MAX
+};
+
+enum wmi_tlv_service {
+ WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_TLV_SERVICE_SCAN_OFFLOAD = 1,
+ WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD = 2,
+ WMI_TLV_SERVICE_BCN_MISS_OFFLOAD = 3,
+ WMI_TLV_SERVICE_STA_PWRSAVE = 4,
+ WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE = 5,
+ WMI_TLV_SERVICE_AP_UAPSD = 6,
+ WMI_TLV_SERVICE_AP_DFS = 7,
+ WMI_TLV_SERVICE_11AC = 8,
+ WMI_TLV_SERVICE_BLOCKACK = 9,
+ WMI_TLV_SERVICE_PHYERR = 10,
+ WMI_TLV_SERVICE_BCN_FILTER = 11,
+ WMI_TLV_SERVICE_RTT = 12,
+ WMI_TLV_SERVICE_WOW = 13,
+ WMI_TLV_SERVICE_RATECTRL_CACHE = 14,
+ WMI_TLV_SERVICE_IRAM_TIDS = 15,
+ WMI_TLV_SERVICE_ARPNS_OFFLOAD = 16,
+ WMI_TLV_SERVICE_NLO = 17,
+ WMI_TLV_SERVICE_GTK_OFFLOAD = 18,
+ WMI_TLV_SERVICE_SCAN_SCH = 19,
+ WMI_TLV_SERVICE_CSA_OFFLOAD = 20,
+ WMI_TLV_SERVICE_CHATTER = 21,
+ WMI_TLV_SERVICE_COEX_FREQAVOID = 22,
+ WMI_TLV_SERVICE_PACKET_POWER_SAVE = 23,
+ WMI_TLV_SERVICE_FORCE_FW_HANG = 24,
+ WMI_TLV_SERVICE_GPIO = 25,
+ WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM = 26,
+ WMI_STA_UAPSD_BASIC_AUTO_TRIG = 27,
+ WMI_STA_UAPSD_VAR_AUTO_TRIG = 28,
+ WMI_TLV_SERVICE_STA_KEEP_ALIVE = 29,
+ WMI_TLV_SERVICE_TX_ENCAP = 30,
+ WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC = 31,
+ WMI_TLV_SERVICE_EARLY_RX = 32,
+ WMI_TLV_SERVICE_STA_SMPS = 33,
+ WMI_TLV_SERVICE_FWTEST = 34,
+ WMI_TLV_SERVICE_STA_WMMAC = 35,
+ WMI_TLV_SERVICE_TDLS = 36,
+ WMI_TLV_SERVICE_BURST = 37,
+ WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE = 38,
+ WMI_TLV_SERVICE_ADAPTIVE_OCS = 39,
+ WMI_TLV_SERVICE_BA_SSN_SUPPORT = 40,
+ WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE = 41,
+ WMI_TLV_SERVICE_WLAN_HB = 42,
+ WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT = 43,
+ WMI_TLV_SERVICE_BATCH_SCAN = 44,
+ WMI_TLV_SERVICE_QPOWER = 45,
+ WMI_TLV_SERVICE_PLMREQ = 46,
+ WMI_TLV_SERVICE_THERMAL_MGMT = 47,
+ WMI_TLV_SERVICE_RMC = 48,
+ WMI_TLV_SERVICE_MHF_OFFLOAD = 49,
+ WMI_TLV_SERVICE_COEX_SAR = 50,
+ WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE = 51,
+ WMI_TLV_SERVICE_NAN = 52,
+ WMI_TLV_SERVICE_L1SS_STAT = 53,
+ WMI_TLV_SERVICE_ESTIMATE_LINKSPEED = 54,
+ WMI_TLV_SERVICE_OBSS_SCAN = 55,
+ WMI_TLV_SERVICE_TDLS_OFFCHAN = 56,
+ WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA = 57,
+ WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA = 58,
+ WMI_TLV_SERVICE_IBSS_PWRSAVE = 59,
+ WMI_TLV_SERVICE_LPASS = 60,
+ WMI_TLV_SERVICE_EXTSCAN = 61,
+ WMI_TLV_SERVICE_D0WOW = 62,
+ WMI_TLV_SERVICE_HSOFFLOAD = 63,
+ WMI_TLV_SERVICE_ROAM_HO_OFFLOAD = 64,
+ WMI_TLV_SERVICE_RX_FULL_REORDER = 65,
+ WMI_TLV_SERVICE_DHCP_OFFLOAD = 66,
+ WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT = 67,
+ WMI_TLV_SERVICE_MDNS_OFFLOAD = 68,
+ WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD = 69,
+ WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT = 70,
+ WMI_TLV_SERVICE_OCB = 71,
+ WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD = 72,
+ WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT = 73,
+ WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD = 74,
+ WMI_TLV_SERVICE_MGMT_TX_HTT = 75,
+ WMI_TLV_SERVICE_MGMT_TX_WMI = 76,
+ WMI_TLV_SERVICE_EXT_MSG = 77,
+ WMI_TLV_SERVICE_MAWC = 78,
+ WMI_TLV_SERVICE_PEER_ASSOC_CONF = 79,
+ WMI_TLV_SERVICE_EGAP = 80,
+ WMI_TLV_SERVICE_STA_PMF_OFFLOAD = 81,
+ WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY = 82,
+ WMI_TLV_SERVICE_ENHANCED_PROXY_STA = 83,
+ WMI_TLV_SERVICE_ATF = 84,
+ WMI_TLV_SERVICE_COEX_GPIO = 85,
+ WMI_TLV_SERVICE_AUX_SPECTRAL_INTF = 86,
+ WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF = 87,
+ WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64 = 88,
+ WMI_TLV_SERVICE_ENTERPRISE_MESH = 89,
+ WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT = 90,
+ WMI_TLV_SERVICE_BPF_OFFLOAD = 91,
+ WMI_TLV_SERVICE_SYNC_DELETE_CMDS = 92,
+ WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT = 93,
+ WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT = 94,
+ WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES = 95,
+ WMI_TLV_SERVICE_NAN_DATA = 96,
+ WMI_TLV_SERVICE_NAN_RTT = 97,
+ WMI_TLV_SERVICE_11AX = 98,
+ WMI_TLV_SERVICE_DEPRECATED_REPLACE = 99,
+ WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE = 100,
+ WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER = 101,
+ WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT = 102,
+ WMI_TLV_SERVICE_MESH_11S = 103,
+ WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT = 104,
+ WMI_TLV_SERVICE_VDEV_RX_FILTER = 105,
+ WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT = 106,
+ WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET = 107,
+ WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET = 108,
+ WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER = 109,
+ WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT = 110,
+ WMI_TLV_SERVICE_WLAN_STATS_REPORT = 111,
+ WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT = 112,
+ WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD = 113,
+ WMI_TLV_SERVICE_RCPI_SUPPORT = 114,
+ WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT = 115,
+ WMI_TLV_SERVICE_PEER_STATS_INFO = 116,
+ WMI_TLV_SERVICE_REGULATORY_DB = 117,
+ WMI_TLV_SERVICE_11D_OFFLOAD = 118,
+ WMI_TLV_SERVICE_HW_DATA_FILTERING = 119,
+ WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART = 120,
+ WMI_TLV_SERVICE_PKT_ROUTING = 121,
+ WMI_TLV_SERVICE_CHECK_CAL_VERSION = 122,
+ WMI_TLV_SERVICE_OFFCHAN_TX_WMI = 123,
+ WMI_TLV_SERVICE_8SS_TX_BFEE = 124,
+ WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT = 125,
+ WMI_TLV_SERVICE_ACK_TIMEOUT = 126,
+ WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64 = 127,
+
+ /* The first 128 bits */
+ WMI_MAX_SERVICE = 128,
+
+ WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128,
+ WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT = 129,
+ WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT = 130,
+ WMI_TLV_SERVICE_FILS_SUPPORT = 131,
+ WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD = 132,
+ WMI_TLV_SERVICE_WLAN_DHCP_RENEW = 133,
+ WMI_TLV_SERVICE_MAWC_SUPPORT = 134,
+ WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG = 135,
+ WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT = 136,
+ WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT = 137,
+ WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT = 138,
+ WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT = 139,
+ WMI_TLV_SERVICE_THERM_THROT = 140,
+ WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT = 141,
+ WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN = 142,
+ WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143,
+ WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144,
+ WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145,
+ WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146,
+ WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147,
+ WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148,
+ WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149,
+ WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150,
+ WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151,
+ WMI_TLV_SERVICE_STA_TWT = 152,
+ WMI_TLV_SERVICE_AP_TWT = 153,
+ WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154,
+ WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155,
+ WMI_TLV_SERVICE_PEER_TID_CONFIGS_SUPPORT = 156,
+ WMI_TLV_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT = 157,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT = 158,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT = 159,
+ WMI_TLV_SERVICE_MOTION_DET = 160,
+ WMI_TLV_SERVICE_INFRA_MBSSID = 161,
+ WMI_TLV_SERVICE_OBSS_SPATIAL_REUSE = 162,
+ WMI_TLV_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT = 163,
+ WMI_TLV_SERVICE_NAN_DBS_SUPPORT = 164,
+ WMI_TLV_SERVICE_NDI_DBS_SUPPORT = 165,
+ WMI_TLV_SERVICE_NAN_SAP_SUPPORT = 166,
+ WMI_TLV_SERVICE_NDI_SAP_SUPPORT = 167,
+ WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT = 168,
+ WMI_TLV_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_1 = 169,
+ WMI_TLV_SERVICE_ESP_SUPPORT = 170,
+ WMI_TLV_SERVICE_PEER_CHWIDTH_CHANGE = 171,
+ WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172,
+ WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173,
+ WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
+ WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
+ WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
+ WMI_TLV_SERVICE_COEX_SUPPORT_UNEQUAL_ISOLATION = 177,
+ WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT = 178,
+ WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS = 179,
+ WMI_TLV_SERVICE_BEACON_RECEPTION_STATS = 180,
+ WMI_TLV_SERVICE_FETCH_TX_PN = 181,
+ WMI_TLV_SERVICE_PEER_UNMAP_RESPONSE_SUPPORT = 182,
+ WMI_TLV_SERVICE_TX_PER_PEER_AMPDU_SIZE = 183,
+ WMI_TLV_SERVICE_BSS_COLOR_SWITCH_COUNT = 184,
+ WMI_TLV_SERVICE_HTT_PEER_STATS_SUPPORT = 185,
+ WMI_TLV_SERVICE_UL_RU26_ALLOWED = 186,
+ WMI_TLV_SERVICE_GET_MWS_COEX_STATE = 187,
+ WMI_TLV_SERVICE_GET_MWS_DPWB_STATE = 188,
+ WMI_TLV_SERVICE_GET_MWS_TDM_STATE = 189,
+ WMI_TLV_SERVICE_GET_MWS_IDRX_STATE = 190,
+ WMI_TLV_SERVICE_GET_MWS_ANTENNA_SHARING_STATE = 191,
+ WMI_TLV_SERVICE_ENHANCED_TPC_CONFIG_EVENT = 192,
+ WMI_TLV_SERVICE_WLM_STATS_REQUEST = 193,
+ WMI_TLV_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT = 194,
+ WMI_TLV_SERVICE_WPA3_FT_SAE_SUPPORT = 195,
+ WMI_TLV_SERVICE_WPA3_FT_SUITE_B_SUPPORT = 196,
+ WMI_TLV_SERVICE_VOW_ENABLE = 197,
+ WMI_TLV_SERVICE_CFR_CAPTURE_IND_EVT_TYPE_1 = 198,
+ WMI_TLV_SERVICE_BROADCAST_TWT = 199,
+ WMI_TLV_SERVICE_RAP_DETECTION_SUPPORT = 200,
+ WMI_TLV_SERVICE_PS_TDCC = 201,
+ WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_LEGACY = 202,
+ WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_OVERRIDE = 203,
+ WMI_TLV_SERVICE_TX_PWR_PER_PEER = 204,
+ WMI_TLV_SERVICE_STA_PLUS_STA_SUPPORT = 205,
+ WMI_TLV_SERVICE_WPA3_FT_FILS = 206,
+ WMI_TLV_SERVICE_ADAPTIVE_11R_ROAM = 207,
+ WMI_TLV_SERVICE_CHAN_RF_CHARACTERIZATION_INFO = 208,
+ WMI_TLV_SERVICE_FW_IFACE_COMBINATION_SUPPORT = 209,
+ WMI_TLV_SERVICE_TX_COMPL_TSF64 = 210,
+ WMI_TLV_SERVICE_DSM_ROAM_FILTER = 211,
+ WMI_TLV_SERVICE_PACKET_CAPTURE_SUPPORT = 212,
+ WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
+ WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
+ WMI_TLV_SERVICE_EXT2_MSG = 220,
+ WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT = 246,
+ WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT = 249,
+ WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT = 253,
+ WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE = 263,
+
+ /* The second 128 bits */
+ WMI_MAX_EXT_SERVICE = 256,
+ WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL = 265,
+ WMI_TLV_SERVICE_EXT_TPC_REG_SUPPORT = 280,
+ WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281,
+ WMI_TLV_SERVICE_BIOS_SAR_SUPPORT = 326,
+ WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN = 357,
+
+ /* The third 128 bits */
+ WMI_MAX_EXT2_SERVICE = 384
+};
+
+enum {
+ WMI_SMPS_FORCED_MODE_NONE = 0,
+ WMI_SMPS_FORCED_MODE_DISABLED,
+ WMI_SMPS_FORCED_MODE_STATIC,
+ WMI_SMPS_FORCED_MODE_DYNAMIC
+};
+
+#define WMI_TPC_CHAINMASK_CONFIG_BAND_2G 0
+#define WMI_TPC_CHAINMASK_CONFIG_BAND_5G 1
+#define WMI_NUM_SUPPORTED_BAND_MAX 2
+
+#define WMI_PEER_MIMO_PS_STATE 0x1
+#define WMI_PEER_AMPDU 0x2
+#define WMI_PEER_AUTHORIZE 0x3
+#define WMI_PEER_CHWIDTH 0x4
+#define WMI_PEER_NSS 0x5
+#define WMI_PEER_USE_4ADDR 0x6
+#define WMI_PEER_MEMBERSHIP 0x7
+#define WMI_PEER_USERPOS 0x8
+#define WMI_PEER_CRIT_PROTO_HINT_ENABLED 0x9
+#define WMI_PEER_TX_FAIL_CNT_THR 0xA
+#define WMI_PEER_SET_HW_RETRY_CTS2S 0xB
+#define WMI_PEER_IBSS_ATIM_WINDOW_LENGTH 0xC
+#define WMI_PEER_PHYMODE 0xD
+#define WMI_PEER_USE_FIXED_PWR 0xE
+#define WMI_PEER_PARAM_FIXED_RATE 0xF
+#define WMI_PEER_SET_MU_WHITELIST 0x10
+#define WMI_PEER_SET_MAX_TX_RATE 0x11
+#define WMI_PEER_SET_MIN_TX_RATE 0x12
+#define WMI_PEER_SET_DEFAULT_ROUTING 0x13
+
+/* slot time long */
+#define WMI_VDEV_SLOT_TIME_LONG 0x1
+/* slot time short */
+#define WMI_VDEV_SLOT_TIME_SHORT 0x2
+/* preablbe long */
+#define WMI_VDEV_PREAMBLE_LONG 0x1
+/* preablbe short */
+#define WMI_VDEV_PREAMBLE_SHORT 0x2
+
+enum wmi_peer_smps_state {
+ WMI_PEER_SMPS_PS_NONE = 0x0,
+ WMI_PEER_SMPS_STATIC = 0x1,
+ WMI_PEER_SMPS_DYNAMIC = 0x2
+};
+
+enum wmi_peer_chwidth {
+ WMI_PEER_CHWIDTH_20MHZ = 0,
+ WMI_PEER_CHWIDTH_40MHZ = 1,
+ WMI_PEER_CHWIDTH_80MHZ = 2,
+ WMI_PEER_CHWIDTH_160MHZ = 3,
+};
+
+enum wmi_beacon_gen_mode {
+ WMI_BEACON_STAGGERED_MODE = 0,
+ WMI_BEACON_BURST_MODE = 1
+};
+
+enum wmi_direct_buffer_module {
+ WMI_DIRECT_BUF_SPECTRAL = 0,
+ WMI_DIRECT_BUF_CFR = 1,
+
+ /* keep it last */
+ WMI_DIRECT_BUF_MAX
+};
+
+/* enum wmi_nss_ratio - NSS ratio received from FW during service ready ext
+ * event
+ * WMI_NSS_RATIO_1BY2_NSS -Max nss of 160MHz is equals to half of the max nss
+ * of 80MHz
+ * WMI_NSS_RATIO_3BY4_NSS - Max nss of 160MHz is equals to 3/4 of the max nss
+ * of 80MHz
+ * WMI_NSS_RATIO_1_NSS - Max nss of 160MHz is equals to the max nss of 80MHz
+ * WMI_NSS_RATIO_2_NSS - Max nss of 160MHz is equals to two times the max
+ * nss of 80MHz
+ */
+
+enum wmi_nss_ratio {
+ WMI_NSS_RATIO_1BY2_NSS = 0x0,
+ WMI_NSS_RATIO_3BY4_NSS = 0x1,
+ WMI_NSS_RATIO_1_NSS = 0x2,
+ WMI_NSS_RATIO_2_NSS = 0x3,
+};
+
+enum wmi_dtim_policy {
+ WMI_DTIM_POLICY_IGNORE = 1,
+ WMI_DTIM_POLICY_NORMAL = 2,
+ WMI_DTIM_POLICY_STICK = 3,
+ WMI_DTIM_POLICY_AUTO = 4,
+};
+
+struct wmi_host_pdev_band_to_mac {
+ u32 pdev_id;
+ u32 start_freq;
+ u32 end_freq;
+};
+
+struct ath11k_ppe_threshold {
+ u32 numss_m1;
+ u32 ru_bit_mask;
+ u32 ppet16_ppet8_ru3_ru0[PSOC_HOST_MAX_NUM_SS];
+};
+
+struct ath11k_service_ext_param {
+ u32 default_conc_scan_config_bits;
+ u32 default_fw_config_bits;
+ struct ath11k_ppe_threshold ppet;
+ u32 he_cap_info;
+ u32 mpdu_density;
+ u32 max_bssid_rx_filters;
+ u32 num_hw_modes;
+ u32 num_phy;
+};
+
+struct ath11k_hw_mode_caps {
+ u32 hw_mode_id;
+ u32 phy_id_map;
+ u32 hw_mode_config_type;
+};
+
+#define PSOC_HOST_MAX_PHY_SIZE (3)
+#define ATH11K_11B_SUPPORT BIT(0)
+#define ATH11K_11G_SUPPORT BIT(1)
+#define ATH11K_11A_SUPPORT BIT(2)
+#define ATH11K_11N_SUPPORT BIT(3)
+#define ATH11K_11AC_SUPPORT BIT(4)
+#define ATH11K_11AX_SUPPORT BIT(5)
+
+struct ath11k_hal_reg_capabilities_ext {
+ u32 phy_id;
+ u32 eeprom_reg_domain;
+ u32 eeprom_reg_domain_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+};
+
+#define WMI_HOST_MAX_PDEV 3
+
+struct wlan_host_mem_chunk {
+ u32 tlv_header;
+ u32 req_id;
+ u32 ptr;
+ u32 size;
+} __packed;
+
+struct wmi_host_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
+struct wmi_init_cmd_param {
+ u32 tlv_header;
+ struct target_resource_config *res_cfg;
+ u8 num_mem_chunks;
+ struct wmi_host_mem_chunk *mem_chunks;
+ u32 hw_mode_id;
+ u32 num_band_to_mac;
+ struct wmi_host_pdev_band_to_mac band_to_mac[WMI_HOST_MAX_PDEV];
+};
+
+struct wmi_pdev_band_to_mac {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 start_freq;
+ u32 end_freq;
+} __packed;
+
+struct wmi_pdev_set_hw_mode_cmd_param {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 hw_mode_index;
+ u32 num_band_to_mac;
+} __packed;
+
+struct wmi_ppe_threshold {
+ u32 numss_m1; /** NSS - 1*/
+ union {
+ u32 ru_count;
+ u32 ru_mask;
+ } __packed;
+ u32 ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS];
+} __packed;
+
+#define HW_BD_INFO_SIZE 5
+
+struct wmi_abi_version {
+ u32 abi_version_0;
+ u32 abi_version_1;
+ u32 abi_version_ns_0;
+ u32 abi_version_ns_1;
+ u32 abi_version_ns_2;
+ u32 abi_version_ns_3;
+} __packed;
+
+struct wmi_init_cmd {
+ u32 tlv_header;
+ struct wmi_abi_version host_abi_vers;
+ u32 num_host_mem_chunks;
+} __packed;
+
+#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5)
+#define WMI_RSRC_CFG_FLAG2_CALC_NEXT_DTIM_COUNT_SET BIT(9)
+#define WMI_RSRC_CFG_FLAG1_ACK_RSSI BIT(18)
+
+#define WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT 4
+
+struct wmi_resource_config {
+ u32 tlv_header;
+ u32 num_vdevs;
+ u32 num_peers;
+ u32 num_offload_peers;
+ u32 num_offload_reorder_buffs;
+ u32 num_peer_keys;
+ u32 num_tids;
+ u32 ast_skid_limit;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 rx_timeout_pri[4];
+ u32 rx_decap_mode;
+ u32 scan_max_pending_req;
+ u32 bmiss_offload_max_vdev;
+ u32 roam_offload_max_vdev;
+ u32 roam_offload_max_ap_profiles;
+ u32 num_mcast_groups;
+ u32 num_mcast_table_elems;
+ u32 mcast2ucast_mode;
+ u32 tx_dbg_log_size;
+ u32 num_wds_entries;
+ u32 dma_burst_size;
+ u32 mac_aggr_delim;
+ u32 rx_skip_defrag_timeout_dup_detection_check;
+ u32 vow_config;
+ u32 gtk_offload_max_vdev;
+ u32 num_msdu_desc;
+ u32 max_frag_entries;
+ u32 num_tdls_vdevs;
+ u32 num_tdls_conn_table_entries;
+ u32 beacon_tx_offload_max_vdev;
+ u32 num_multicast_filter_entries;
+ u32 num_wow_filters;
+ u32 num_keep_alive_pattern;
+ u32 keep_alive_pattern_size;
+ u32 max_tdls_concurrent_sleep_sta;
+ u32 max_tdls_concurrent_buffer_sta;
+ u32 wmi_send_separate;
+ u32 num_ocb_vdevs;
+ u32 num_ocb_channels;
+ u32 num_ocb_schedules;
+ u32 flag1;
+ u32 smart_ant_cap;
+ u32 bk_minfree;
+ u32 be_minfree;
+ u32 vi_minfree;
+ u32 vo_minfree;
+ u32 alloc_frag_desc_for_data_pkt;
+ u32 num_ns_ext_tuples_cfg;
+ u32 bpf_instruction_size;
+ u32 max_bssid_rx_filters;
+ u32 use_pdev_id;
+ u32 max_num_dbs_scan_duty_cycle;
+ u32 max_num_group_keys;
+ u32 peer_map_unmap_v2_support;
+ u32 sched_params;
+ u32 twt_ap_pdev_count;
+ u32 twt_ap_sta_count;
+ u32 max_nlo_ssids;
+ u32 num_pkt_filters;
+ u32 num_max_sta_vdevs;
+ u32 max_bssid_indicator;
+ u32 ul_resp_config;
+ u32 msdu_flow_override_config0;
+ u32 msdu_flow_override_config1;
+ u32 flags2;
+ u32 host_service_flags;
+ u32 max_rnr_neighbours;
+ u32 ema_max_vap_cnt;
+ u32 ema_max_profile_period;
+} __packed;
+
+struct wmi_service_ready_event {
+ u32 fw_build_vers;
+ struct wmi_abi_version fw_abi_vers;
+ u32 phy_capability;
+ u32 max_frag_entry;
+ u32 num_rf_chains;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ u32 vht_supp_mcs;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 sys_cap_info;
+ u32 min_pkt_size_enable;
+ u32 max_bcn_ie_size;
+ u32 num_mem_reqs;
+ u32 max_num_scan_channels;
+ u32 hw_bd_id;
+ u32 hw_bd_info[HW_BD_INFO_SIZE];
+ u32 max_supported_macs;
+ u32 wmi_fw_sub_feat_caps;
+ u32 num_dbs_hw_modes;
+ /* txrx_chainmask
+ * [7:0] - 2G band tx chain mask
+ * [15:8] - 2G band rx chain mask
+ * [23:16] - 5G band tx chain mask
+ * [31:24] - 5G band rx chain mask
+ */
+ u32 txrx_chainmask;
+ u32 default_dbs_hw_mode_index;
+ u32 num_msdu_desc;
+} __packed;
+
+#define WMI_SERVICE_BM_SIZE ((WMI_MAX_SERVICE + sizeof(u32) - 1) / sizeof(u32))
+
+#define WMI_SERVICE_SEGMENT_BM_SIZE32 4 /* 4x u32 = 128 bits */
+#define WMI_SERVICE_EXT_BM_SIZE (WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32))
+#define WMI_AVAIL_SERVICE_BITS_IN_SIZE32 32
+#define WMI_SERVICE_BITS_IN_SIZE32 4
+
+struct wmi_service_ready_ext_event {
+ u32 default_conc_scan_config_bits;
+ u32 default_fw_config_bits;
+ struct wmi_ppe_threshold ppet;
+ u32 he_cap_info;
+ u32 mpdu_density;
+ u32 max_bssid_rx_filters;
+ u32 fw_build_vers_ext;
+ u32 max_nlo_ssids;
+ u32 max_bssid_indicator;
+ u32 he_cap_info_ext;
+} __packed;
+
+struct wmi_soc_mac_phy_hw_mode_caps {
+ u32 num_hw_modes;
+ u32 num_chainmask_tables;
+} __packed;
+
+struct wmi_hw_mode_capabilities {
+ u32 tlv_header;
+ u32 hw_mode_id;
+ u32 phy_id_map;
+ u32 hw_mode_config_type;
+} __packed;
+
+#define WMI_MAX_HECAP_PHY_SIZE (3)
+#define WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS BIT(0)
+#define WMI_NSS_RATIO_ENABLE_DISABLE_GET(_val) \
+ FIELD_GET(WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS, _val)
+#define WMI_NSS_RATIO_INFO_BITPOS GENMASK(4, 1)
+#define WMI_NSS_RATIO_INFO_GET(_val) \
+ FIELD_GET(WMI_NSS_RATIO_INFO_BITPOS, _val)
+
+struct wmi_mac_phy_capabilities {
+ u32 hw_mode_id;
+ u32 pdev_id;
+ u32 phy_id;
+ u32 supported_flags;
+ u32 supported_bands;
+ u32 ampdu_density;
+ u32 max_bw_supported_2g;
+ u32 ht_cap_info_2g;
+ u32 vht_cap_info_2g;
+ u32 vht_supp_mcs_2g;
+ u32 he_cap_info_2g;
+ u32 he_supp_mcs_2g;
+ u32 tx_chain_mask_2g;
+ u32 rx_chain_mask_2g;
+ u32 max_bw_supported_5g;
+ u32 ht_cap_info_5g;
+ u32 vht_cap_info_5g;
+ u32 vht_supp_mcs_5g;
+ u32 he_cap_info_5g;
+ u32 he_supp_mcs_5g;
+ u32 tx_chain_mask_5g;
+ u32 rx_chain_mask_5g;
+ u32 he_cap_phy_info_2g[WMI_MAX_HECAP_PHY_SIZE];
+ u32 he_cap_phy_info_5g[WMI_MAX_HECAP_PHY_SIZE];
+ struct wmi_ppe_threshold he_ppet2g;
+ struct wmi_ppe_threshold he_ppet5g;
+ u32 chainmask_table_id;
+ u32 lmac_id;
+ u32 he_cap_info_2g_ext;
+ u32 he_cap_info_5g_ext;
+ u32 he_cap_info_internal;
+ u32 wireless_modes;
+ u32 low_2ghz_chan_freq;
+ u32 high_2ghz_chan_freq;
+ u32 low_5ghz_chan_freq;
+ u32 high_5ghz_chan_freq;
+ u32 nss_ratio;
+} __packed;
+
+struct wmi_hal_reg_capabilities_ext {
+ u32 tlv_header;
+ u32 phy_id;
+ u32 eeprom_reg_domain;
+ u32 eeprom_reg_domain_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+} __packed;
+
+struct wmi_soc_hal_reg_capabilities {
+ u32 num_phy;
+} __packed;
+
+/* 2 word representation of MAC addr */
+struct wmi_mac_addr {
+ union {
+ u8 addr[6];
+ struct {
+ u32 word0;
+ u32 word1;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_dma_ring_capabilities {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 module_id;
+ u32 min_elem;
+ u32 min_buf_sz;
+ u32 min_buf_align;
+} __packed;
+
+struct wmi_ready_event_min {
+ struct wmi_abi_version fw_abi_vers;
+ struct wmi_mac_addr mac_addr;
+ u32 status;
+ u32 num_dscp_table;
+ u32 num_extra_mac_addr;
+ u32 num_total_peers;
+ u32 num_extra_peers;
+} __packed;
+
+struct wmi_ready_event {
+ struct wmi_ready_event_min ready_event_min;
+ u32 max_ast_index;
+ u32 pktlog_defs_checksum;
+} __packed;
+
+struct wmi_service_available_event {
+ u32 wmi_service_segment_offset;
+ u32 wmi_service_segment_bitmap[WMI_SERVICE_SEGMENT_BM_SIZE32];
+} __packed;
+
+struct ath11k_pdev_wmi {
+ struct ath11k_wmi_base *wmi_ab;
+ enum ath11k_htc_ep_id eid;
+ u32 rx_decap_mode;
+ wait_queue_head_t tx_ce_desc_wq;
+};
+
+struct vdev_create_params {
+ u8 if_id;
+ u32 type;
+ u32 subtype;
+ struct {
+ u8 tx;
+ u8 rx;
+ } chains[NUM_NL80211_BANDS];
+ u32 pdev_id;
+ u32 mbssid_flags;
+ u32 mbssid_tx_vdev_id;
+};
+
+struct wmi_vdev_create_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 vdev_type;
+ u32 vdev_subtype;
+ struct wmi_mac_addr vdev_macaddr;
+ u32 num_cfg_txrx_streams;
+ u32 pdev_id;
+ u32 mbssid_flags;
+ u32 mbssid_tx_vdev_id;
+} __packed;
+
+struct wmi_vdev_txrx_streams {
+ u32 tlv_header;
+ u32 band;
+ u32 supported_tx_streams;
+ u32 supported_rx_streams;
+} __packed;
+
+struct wmi_vdev_delete_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+struct wmi_vdev_up_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 vdev_assoc_id;
+ struct wmi_mac_addr vdev_bssid;
+ struct wmi_mac_addr tx_vdev_bssid;
+ u32 nontx_profile_idx;
+ u32 nontx_profile_cnt;
+} __packed;
+
+struct wmi_vdev_stop_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+struct wmi_vdev_down_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+#define WMI_VDEV_START_HIDDEN_SSID BIT(0)
+#define WMI_VDEV_START_PMF_ENABLED BIT(1)
+#define WMI_VDEV_START_LDPC_RX_ENABLED BIT(3)
+#define WMI_VDEV_START_HW_ENCRYPTION_DISABLED BIT(4)
+
+struct wmi_ssid {
+ u32 ssid_len;
+ u32 ssid[8];
+} __packed;
+
+#define ATH11K_VDEV_SETUP_TIMEOUT_HZ (1 * HZ)
+
+struct wmi_vdev_start_request_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 requestor_id;
+ u32 beacon_interval;
+ u32 dtim_period;
+ u32 flags;
+ struct wmi_ssid ssid;
+ u32 bcn_tx_rate;
+ u32 bcn_txpower;
+ u32 num_noa_descriptors;
+ u32 disable_hw_ack;
+ u32 preferred_tx_streams;
+ u32 preferred_rx_streams;
+ u32 he_ops;
+ u32 cac_duration_ms;
+ u32 regdomain;
+ u32 min_data_rate;
+ u32 mbssid_flags;
+ u32 mbssid_tx_vdev_id;
+} __packed;
+
+#define MGMT_TX_DL_FRM_LEN 64
+#define WMI_MAC_MAX_SSID_LENGTH 32
+struct mac_ssid {
+ u8 length;
+ u8 mac_ssid[WMI_MAC_MAX_SSID_LENGTH];
+} __packed;
+
+struct wmi_p2p_noa_descriptor {
+ u32 type_count;
+ u32 duration;
+ u32 interval;
+ u32 start_time;
+};
+
+struct channel_param {
+ u8 chan_id;
+ u8 pwr;
+ u32 mhz;
+ u32 half_rate:1,
+ quarter_rate:1,
+ dfs_set:1,
+ dfs_set_cfreq2:1,
+ is_chan_passive:1,
+ allow_ht:1,
+ allow_vht:1,
+ allow_he:1,
+ set_agile:1,
+ psc_channel:1;
+ u32 phy_mode;
+ u32 cfreq1;
+ u32 cfreq2;
+ char maxpower;
+ char minpower;
+ char maxregpower;
+ u8 antennamax;
+ u8 reg_class_id;
+} __packed;
+
+enum wmi_phy_mode {
+ MODE_11A = 0,
+ MODE_11G = 1, /* 11b/g Mode */
+ MODE_11B = 2, /* 11b Mode */
+ MODE_11GONLY = 3, /* 11g only Mode */
+ MODE_11NA_HT20 = 4,
+ MODE_11NG_HT20 = 5,
+ MODE_11NA_HT40 = 6,
+ MODE_11NG_HT40 = 7,
+ MODE_11AC_VHT20 = 8,
+ MODE_11AC_VHT40 = 9,
+ MODE_11AC_VHT80 = 10,
+ MODE_11AC_VHT20_2G = 11,
+ MODE_11AC_VHT40_2G = 12,
+ MODE_11AC_VHT80_2G = 13,
+ MODE_11AC_VHT80_80 = 14,
+ MODE_11AC_VHT160 = 15,
+ MODE_11AX_HE20 = 16,
+ MODE_11AX_HE40 = 17,
+ MODE_11AX_HE80 = 18,
+ MODE_11AX_HE80_80 = 19,
+ MODE_11AX_HE160 = 20,
+ MODE_11AX_HE20_2G = 21,
+ MODE_11AX_HE40_2G = 22,
+ MODE_11AX_HE80_2G = 23,
+ MODE_UNKNOWN = 24,
+ MODE_MAX = 24
+};
+
+static inline const char *ath11k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+ switch (mode) {
+ case MODE_11A:
+ return "11a";
+ case MODE_11G:
+ return "11g";
+ case MODE_11B:
+ return "11b";
+ case MODE_11GONLY:
+ return "11gonly";
+ case MODE_11NA_HT20:
+ return "11na-ht20";
+ case MODE_11NG_HT20:
+ return "11ng-ht20";
+ case MODE_11NA_HT40:
+ return "11na-ht40";
+ case MODE_11NG_HT40:
+ return "11ng-ht40";
+ case MODE_11AC_VHT20:
+ return "11ac-vht20";
+ case MODE_11AC_VHT40:
+ return "11ac-vht40";
+ case MODE_11AC_VHT80:
+ return "11ac-vht80";
+ case MODE_11AC_VHT160:
+ return "11ac-vht160";
+ case MODE_11AC_VHT80_80:
+ return "11ac-vht80+80";
+ case MODE_11AC_VHT20_2G:
+ return "11ac-vht20-2g";
+ case MODE_11AC_VHT40_2G:
+ return "11ac-vht40-2g";
+ case MODE_11AC_VHT80_2G:
+ return "11ac-vht80-2g";
+ case MODE_11AX_HE20:
+ return "11ax-he20";
+ case MODE_11AX_HE40:
+ return "11ax-he40";
+ case MODE_11AX_HE80:
+ return "11ax-he80";
+ case MODE_11AX_HE80_80:
+ return "11ax-he80+80";
+ case MODE_11AX_HE160:
+ return "11ax-he160";
+ case MODE_11AX_HE20_2G:
+ return "11ax-he20-2g";
+ case MODE_11AX_HE40_2G:
+ return "11ax-he40-2g";
+ case MODE_11AX_HE80_2G:
+ return "11ax-he80-2g";
+ case MODE_UNKNOWN:
+ /* skip */
+ break;
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled
+ */
+ }
+
+ return "<unknown>";
+}
+
+struct wmi_channel_arg {
+ u32 freq;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ bool passive;
+ bool allow_ibss;
+ bool allow_ht;
+ bool allow_vht;
+ bool ht40plus;
+ bool chan_radar;
+ bool freq2_radar;
+ bool allow_he;
+ u32 min_power;
+ u32 max_power;
+ u32 max_reg_power;
+ u32 max_antenna_gain;
+ enum wmi_phy_mode mode;
+};
+
+struct wmi_vdev_start_req_arg {
+ u32 vdev_id;
+ struct wmi_channel_arg channel;
+ u32 bcn_intval;
+ u32 dtim_period;
+ u8 *ssid;
+ u32 ssid_len;
+ u32 bcn_tx_rate;
+ u32 bcn_tx_power;
+ bool disable_hw_ack;
+ bool hidden_ssid;
+ bool pmf_enabled;
+ u32 he_ops;
+ u32 cac_duration_ms;
+ u32 regdomain;
+ u32 pref_rx_streams;
+ u32 pref_tx_streams;
+ u32 num_noa_descriptors;
+ u32 min_data_rate;
+ u32 mbssid_flags;
+ u32 mbssid_tx_vdev_id;
+};
+
+struct peer_create_params {
+ const u8 *peer_addr;
+ u32 peer_type;
+ u32 vdev_id;
+};
+
+struct peer_delete_params {
+ u8 vdev_id;
+};
+
+struct peer_flush_params {
+ u32 peer_tid_bitmap;
+ u8 vdev_id;
+};
+
+struct pdev_set_regdomain_params {
+ u16 current_rd_in_use;
+ u16 current_rd_2g;
+ u16 current_rd_5g;
+ u32 ctl_2g;
+ u32 ctl_5g;
+ u8 dfs_domain;
+ u32 pdev_id;
+};
+
+struct rx_reorder_queue_remove_params {
+ u8 *peer_macaddr;
+ u16 vdev_id;
+ u32 peer_tid_bitmap;
+};
+
+#define WMI_HOST_PDEV_ID_SOC 0xFF
+#define WMI_HOST_PDEV_ID_0 0
+#define WMI_HOST_PDEV_ID_1 1
+#define WMI_HOST_PDEV_ID_2 2
+
+#define WMI_PDEV_ID_SOC 0
+#define WMI_PDEV_ID_1ST 1
+#define WMI_PDEV_ID_2ND 2
+#define WMI_PDEV_ID_3RD 3
+
+/* Freq units in MHz */
+#define REG_RULE_START_FREQ 0x0000ffff
+#define REG_RULE_END_FREQ 0xffff0000
+#define REG_RULE_FLAGS 0x0000ffff
+#define REG_RULE_MAX_BW 0x0000ffff
+#define REG_RULE_REG_PWR 0x00ff0000
+#define REG_RULE_ANT_GAIN 0xff000000
+#define REG_RULE_PSD_INFO BIT(0)
+#define REG_RULE_PSD_EIRP 0xff0000
+
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+
+#define HE_PHYCAP_BYTE_0 0
+#define HE_PHYCAP_BYTE_1 1
+#define HE_PHYCAP_BYTE_2 2
+#define HE_PHYCAP_BYTE_3 3
+#define HE_PHYCAP_BYTE_4 4
+
+#define HECAP_PHY_SU_BFER BIT(7)
+#define HECAP_PHY_SU_BFEE BIT(0)
+#define HECAP_PHY_MU_BFER BIT(1)
+#define HECAP_PHY_UL_MUMIMO BIT(6)
+#define HECAP_PHY_UL_MUOFDMA BIT(7)
+
+#define HECAP_PHY_SUBFMR_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_SU_BFER, hecap_phy[HE_PHYCAP_BYTE_3])
+
+#define HECAP_PHY_SUBFME_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_SU_BFEE, hecap_phy[HE_PHYCAP_BYTE_4])
+
+#define HECAP_PHY_MUBFMR_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_MU_BFER, hecap_phy[HE_PHYCAP_BYTE_4])
+
+#define HECAP_PHY_ULMUMIMO_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_UL_MUMIMO, hecap_phy[HE_PHYCAP_BYTE_2])
+
+#define HECAP_PHY_ULOFDMA_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_UL_MUOFDMA, hecap_phy[HE_PHYCAP_BYTE_2])
+
+#define HE_MODE_SU_TX_BFEE BIT(0)
+#define HE_MODE_SU_TX_BFER BIT(1)
+#define HE_MODE_MU_TX_BFEE BIT(2)
+#define HE_MODE_MU_TX_BFER BIT(3)
+#define HE_MODE_DL_OFDMA BIT(4)
+#define HE_MODE_UL_OFDMA BIT(5)
+#define HE_MODE_UL_MUMIMO BIT(6)
+
+#define HE_DL_MUOFDMA_ENABLE 1
+#define HE_UL_MUOFDMA_ENABLE 1
+#define HE_DL_MUMIMO_ENABLE 1
+#define HE_UL_MUMIMO_ENABLE 1
+#define HE_MU_BFEE_ENABLE 1
+#define HE_SU_BFEE_ENABLE 1
+#define HE_MU_BFER_ENABLE 1
+#define HE_SU_BFER_ENABLE 1
+
+#define HE_VHT_SOUNDING_MODE_ENABLE 1
+#define HE_SU_MU_SOUNDING_MODE_ENABLE 1
+#define HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE 1
+
+/* HE or VHT Sounding */
+#define HE_VHT_SOUNDING_MODE BIT(0)
+/* SU or MU Sounding */
+#define HE_SU_MU_SOUNDING_MODE BIT(2)
+/* Trig or Non-Trig Sounding */
+#define HE_TRIG_NONTRIG_SOUNDING_MODE BIT(3)
+
+#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70
+#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK 0x700
+
+struct pdev_params {
+ u32 param_id;
+ u32 param_value;
+};
+
+enum wmi_peer_type {
+ WMI_PEER_TYPE_DEFAULT = 0,
+ WMI_PEER_TYPE_BSS = 1,
+ WMI_PEER_TYPE_TDLS = 2,
+};
+
+struct wmi_peer_create_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 peer_type;
+} __packed;
+
+struct wmi_peer_delete_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_reorder_queue_setup_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 queue_ptr_lo;
+ u32 queue_ptr_hi;
+ u32 queue_no;
+ u32 ba_window_size_valid;
+ u32 ba_window_size;
+} __packed;
+
+struct wmi_peer_reorder_queue_remove_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid_mask;
+} __packed;
+
+struct gpio_config_params {
+ u32 gpio_num;
+ u32 input;
+ u32 pull_type;
+ u32 intr_mode;
+};
+
+enum wmi_gpio_type {
+ WMI_GPIO_PULL_NONE,
+ WMI_GPIO_PULL_UP,
+ WMI_GPIO_PULL_DOWN
+};
+
+enum wmi_gpio_intr_type {
+ WMI_GPIO_INTTYPE_DISABLE,
+ WMI_GPIO_INTTYPE_RISING_EDGE,
+ WMI_GPIO_INTTYPE_FALLING_EDGE,
+ WMI_GPIO_INTTYPE_BOTH_EDGE,
+ WMI_GPIO_INTTYPE_LEVEL_LOW,
+ WMI_GPIO_INTTYPE_LEVEL_HIGH
+};
+
+enum wmi_bss_chan_info_req_type {
+ WMI_BSS_SURVEY_REQ_TYPE_READ = 1,
+ WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
+};
+
+struct wmi_gpio_config_cmd_param {
+ u32 tlv_header;
+ u32 gpio_num;
+ u32 input;
+ u32 pull_type;
+ u32 intr_mode;
+};
+
+struct gpio_output_params {
+ u32 gpio_num;
+ u32 set;
+};
+
+struct wmi_gpio_output_cmd_param {
+ u32 tlv_header;
+ u32 gpio_num;
+ u32 set;
+};
+
+struct set_fwtest_params {
+ u32 arg;
+ u32 value;
+};
+
+struct wmi_fwtest_set_param_cmd_param {
+ u32 tlv_header;
+ u32 param_id;
+ u32 param_value;
+};
+
+struct wmi_pdev_set_param_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 param_id;
+ u32 param_value;
+} __packed;
+
+struct wmi_pdev_set_ps_mode_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 sta_ps_mode;
+} __packed;
+
+struct wmi_pdev_suspend_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 suspend_opt;
+} __packed;
+
+struct wmi_pdev_resume_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_req_cmd {
+ u32 tlv_header;
+ /* ref wmi_bss_chan_info_req_type */
+ u32 req_type;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_ap_ps_peer_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 param;
+ u32 value;
+} __packed;
+
+struct wmi_sta_powersave_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+} __packed;
+
+struct wmi_pdev_set_regdomain_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 reg_domain;
+ u32 reg_domain_2g;
+ u32 reg_domain_5g;
+ u32 conformance_test_limit_2g;
+ u32 conformance_test_limit_5g;
+ u32 dfs_domain;
+} __packed;
+
+struct wmi_peer_set_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 param_id;
+ u32 param_value;
+} __packed;
+
+struct wmi_peer_flush_tids_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 peer_tid_bitmap;
+} __packed;
+
+struct wmi_dfs_phyerr_offload_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_bcn_offload_ctrl_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 bcn_ctrl_op;
+} __packed;
+
+enum scan_dwelltime_adaptive_mode {
+ SCAN_DWELL_MODE_DEFAULT = 0,
+ SCAN_DWELL_MODE_CONSERVATIVE = 1,
+ SCAN_DWELL_MODE_MODERATE = 2,
+ SCAN_DWELL_MODE_AGGRESSIVE = 3,
+ SCAN_DWELL_MODE_STATIC = 4
+};
+
+#define WLAN_SSID_MAX_LEN 32
+
+struct element_info {
+ u32 len;
+ u8 *ptr;
+};
+
+struct wlan_ssid {
+ u8 length;
+ u8 ssid[WLAN_SSID_MAX_LEN];
+};
+
+struct wmi_vdev_ch_power_info {
+ u32 tlv_header;
+
+ /* Channel center frequency (MHz) */
+ u32 chan_cfreq;
+
+ /* Unit: dBm, either PSD/EIRP power for this frequency or
+ * incremental for non-PSD BW
+ */
+ u32 tx_power;
+} __packed;
+
+struct wmi_vdev_set_tpc_power_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+
+ /* Value: 0 or 1, is PSD power or not */
+ u32 psd_power;
+
+ /* Maximum EIRP power (dBm units), valid only if power is PSD */
+ u32 eirp_power;
+
+ /* Type: WMI_6GHZ_REG_TYPE, used for halphy CTL lookup */
+ u32 power_type_6ghz;
+
+ /* This fixed_param TLV is followed by the below TLVs:
+ * num_pwr_levels of wmi_vdev_ch_power_info
+ * For PSD power, it is the PSD/EIRP power of the frequency (20 MHz chunks).
+ * For non-PSD power, the power values are for 20, 40, and till
+ * BSS BW power levels.
+ * The num_pwr_levels will be checked by sw how many elements present
+ * in the variable-length array.
+ */
+} __packed;
+
+#define WMI_IE_BITMAP_SIZE 8
+
+/* prefix used by scan requestor ids on the host */
+#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
+
+/* prefix used by scan request ids generated on the host */
+/* host cycles through the lower 12 bits to generate ids */
+#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
+
+/* Values lower than this may be refused by some firmware revisions with a scan
+ * completion with a timedout reason.
+ */
+#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
+
+/* Scan priority numbers must be sequential, starting with 0 */
+enum wmi_scan_priority {
+ WMI_SCAN_PRIORITY_VERY_LOW = 0,
+ WMI_SCAN_PRIORITY_LOW,
+ WMI_SCAN_PRIORITY_MEDIUM,
+ WMI_SCAN_PRIORITY_HIGH,
+ WMI_SCAN_PRIORITY_VERY_HIGH,
+ WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
+};
+
+enum wmi_scan_event_type {
+ WMI_SCAN_EVENT_STARTED = BIT(0),
+ WMI_SCAN_EVENT_COMPLETED = BIT(1),
+ WMI_SCAN_EVENT_BSS_CHANNEL = BIT(2),
+ WMI_SCAN_EVENT_FOREIGN_CHAN = BIT(3),
+ WMI_SCAN_EVENT_DEQUEUED = BIT(4),
+ /* possibly by high-prio scan */
+ WMI_SCAN_EVENT_PREEMPTED = BIT(5),
+ WMI_SCAN_EVENT_START_FAILED = BIT(6),
+ WMI_SCAN_EVENT_RESTARTED = BIT(7),
+ WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT = BIT(8),
+ WMI_SCAN_EVENT_SUSPENDED = BIT(9),
+ WMI_SCAN_EVENT_RESUMED = BIT(10),
+ WMI_SCAN_EVENT_MAX = BIT(15),
+};
+
+enum wmi_scan_completion_reason {
+ WMI_SCAN_REASON_COMPLETED,
+ WMI_SCAN_REASON_CANCELLED,
+ WMI_SCAN_REASON_PREEMPTED,
+ WMI_SCAN_REASON_TIMEDOUT,
+ WMI_SCAN_REASON_INTERNAL_FAILURE,
+ WMI_SCAN_REASON_MAX,
+};
+
+struct wmi_start_scan_cmd {
+ u32 tlv_header;
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 scan_priority;
+ u32 notify_scan_events;
+ u32 dwell_time_active;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_ctrl_flags;
+ u32 burst_duration;
+ u32 num_chan;
+ u32 num_bssid;
+ u32 num_ssids;
+ u32 ie_len;
+ u32 n_probes;
+ struct wmi_mac_addr mac_addr;
+ struct wmi_mac_addr mac_mask;
+ u32 ie_bitmap[WMI_IE_BITMAP_SIZE];
+ u32 num_vendor_oui;
+ u32 scan_ctrl_flags_ext;
+ u32 dwell_time_active_2g;
+ u32 dwell_time_active_6g;
+ u32 dwell_time_passive_6g;
+ u32 scan_start_offset;
+} __packed;
+
+#define WMI_SCAN_FLAG_PASSIVE 0x1
+#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
+#define WMI_SCAN_ADD_CCK_RATES 0x4
+#define WMI_SCAN_ADD_OFDM_RATES 0x8
+#define WMI_SCAN_CHAN_STAT_EVENT 0x10
+#define WMI_SCAN_FILTER_PROBE_REQ 0x20
+#define WMI_SCAN_BYPASS_DFS_CHN 0x40
+#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+#define WMI_SCAN_FILTER_PROMISCUOS 0x100
+#define WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS 0x200
+#define WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ 0x400
+#define WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ 0x800
+#define WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ 0x1000
+#define WMI_SCAN_OFFCHAN_MGMT_TX 0x2000
+#define WMI_SCAN_OFFCHAN_DATA_TX 0x4000
+#define WMI_SCAN_CAPTURE_PHY_ERROR 0x8000
+#define WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN 0x10000
+#define WMI_SCAN_FLAG_HALF_RATE_SUPPORT 0x20000
+#define WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT 0x40000
+#define WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ 0x80000
+#define WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ 0x100000
+
+#define WMI_SCAN_DWELL_MODE_MASK 0x00E00000
+#define WMI_SCAN_DWELL_MODE_SHIFT 21
+#define WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE 0x00000800
+
+#define WMI_SCAN_CONFIG_PER_CHANNEL_MASK GENMASK(19, 0)
+#define WMI_SCAN_CH_FLAG_SCAN_ONLY_IF_RNR_FOUND BIT(20)
+
+enum {
+ WMI_SCAN_DWELL_MODE_DEFAULT = 0,
+ WMI_SCAN_DWELL_MODE_CONSERVATIVE = 1,
+ WMI_SCAN_DWELL_MODE_MODERATE = 2,
+ WMI_SCAN_DWELL_MODE_AGGRESSIVE = 3,
+ WMI_SCAN_DWELL_MODE_STATIC = 4,
+};
+
+#define WMI_SCAN_SET_DWELL_MODE(flag, mode) \
+ ((flag) |= (((mode) << WMI_SCAN_DWELL_MODE_SHIFT) & \
+ WMI_SCAN_DWELL_MODE_MASK))
+
+struct hint_short_ssid {
+ u32 freq_flags;
+ u32 short_ssid;
+};
+
+struct hint_bssid {
+ u32 freq_flags;
+ struct wmi_mac_addr bssid;
+};
+
+struct scan_req_params {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 pdev_id;
+ enum wmi_scan_priority scan_priority;
+ u32 scan_ev_started:1,
+ scan_ev_completed:1,
+ scan_ev_bss_chan:1,
+ scan_ev_foreign_chan:1,
+ scan_ev_dequeued:1,
+ scan_ev_preempted:1,
+ scan_ev_start_failed:1,
+ scan_ev_restarted:1,
+ scan_ev_foreign_chn_exit:1,
+ scan_ev_invalid:1,
+ scan_ev_gpio_timeout:1,
+ scan_ev_suspended:1,
+ scan_ev_resumed:1;
+ u32 scan_ctrl_flags_ext;
+ u32 dwell_time_active;
+ u32 dwell_time_active_2g;
+ u32 dwell_time_passive;
+ u32 dwell_time_active_6g;
+ u32 dwell_time_passive_6g;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_f_passive:1,
+ scan_f_bcast_probe:1,
+ scan_f_cck_rates:1,
+ scan_f_ofdm_rates:1,
+ scan_f_chan_stat_evnt:1,
+ scan_f_filter_prb_req:1,
+ scan_f_bypass_dfs_chn:1,
+ scan_f_continue_on_err:1,
+ scan_f_offchan_mgmt_tx:1,
+ scan_f_offchan_data_tx:1,
+ scan_f_promisc_mode:1,
+ scan_f_capture_phy_err:1,
+ scan_f_strict_passive_pch:1,
+ scan_f_half_rate:1,
+ scan_f_quarter_rate:1,
+ scan_f_force_active_dfs_chn:1,
+ scan_f_add_tpc_ie_in_probe:1,
+ scan_f_add_ds_ie_in_probe:1,
+ scan_f_add_spoofed_mac_in_probe:1,
+ scan_f_add_rand_seq_in_probe:1,
+ scan_f_en_ie_whitelist_in_probe:1,
+ scan_f_forced:1,
+ scan_f_2ghz:1,
+ scan_f_5ghz:1,
+ scan_f_80mhz:1;
+ enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
+ u32 burst_duration;
+ u32 num_chan;
+ u32 num_bssid;
+ u32 num_ssids;
+ u32 n_probes;
+ u32 *chan_list;
+ u32 notify_scan_events;
+ struct wlan_ssid ssid[WLAN_SCAN_PARAMS_MAX_SSID];
+ struct wmi_mac_addr bssid_list[WLAN_SCAN_PARAMS_MAX_BSSID];
+ struct element_info extraie;
+ struct element_info htcap;
+ struct element_info vhtcap;
+ u32 num_hint_s_ssid;
+ u32 num_hint_bssid;
+ struct hint_short_ssid hint_s_ssid[WLAN_SCAN_MAX_HINT_S_SSID];
+ struct hint_bssid hint_bssid[WLAN_SCAN_MAX_HINT_BSSID];
+ struct wmi_mac_addr mac_addr;
+ struct wmi_mac_addr mac_mask;
+};
+
+struct wmi_ssid_arg {
+ int len;
+ const u8 *ssid;
+};
+
+struct wmi_bssid_arg {
+ const u8 *bssid;
+};
+
+#define WMI_SCAN_STOP_ONE 0x00000000
+#define WMI_SCN_STOP_VAP_ALL 0x01000000
+#define WMI_SCAN_STOP_ALL 0x04000000
+
+/* Prefix 0xA000 indicates that the scan request
+ * is trigger by HOST
+ */
+#define ATH11K_SCAN_ID 0xA000
+
+enum scan_cancel_req_type {
+ WLAN_SCAN_CANCEL_SINGLE = 1,
+ WLAN_SCAN_CANCEL_VDEV_ALL,
+ WLAN_SCAN_CANCEL_PDEV_ALL,
+};
+
+struct scan_cancel_param {
+ u32 requester;
+ u32 scan_id;
+ enum scan_cancel_req_type req_type;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+#define WMI_CHAN_INFO_MODE GENMASK(5, 0)
+#define WMI_CHAN_INFO_HT40_PLUS BIT(6)
+#define WMI_CHAN_INFO_PASSIVE BIT(7)
+#define WMI_CHAN_INFO_ADHOC_ALLOWED BIT(8)
+#define WMI_CHAN_INFO_AP_DISABLED BIT(9)
+#define WMI_CHAN_INFO_DFS BIT(10)
+#define WMI_CHAN_INFO_ALLOW_HT BIT(11)
+#define WMI_CHAN_INFO_ALLOW_VHT BIT(12)
+#define WMI_CHAN_INFO_CHAN_CHANGE_CAUSE_CSA BIT(13)
+#define WMI_CHAN_INFO_HALF_RATE BIT(14)
+#define WMI_CHAN_INFO_QUARTER_RATE BIT(15)
+#define WMI_CHAN_INFO_DFS_FREQ2 BIT(16)
+#define WMI_CHAN_INFO_ALLOW_HE BIT(17)
+#define WMI_CHAN_INFO_PSC BIT(18)
+
+#define WMI_CHAN_REG_INFO1_MIN_PWR GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO1_MAX_PWR GENMASK(15, 8)
+#define WMI_CHAN_REG_INFO1_MAX_REG_PWR GENMASK(23, 16)
+#define WMI_CHAN_REG_INFO1_REG_CLS GENMASK(31, 24)
+
+#define WMI_CHAN_REG_INFO2_ANT_MAX GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO2_MAX_TX_PWR GENMASK(15, 8)
+
+struct wmi_channel {
+ u32 tlv_header;
+ u32 mhz;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ u32 info;
+ u32 reg_info_1;
+ u32 reg_info_2;
+} __packed;
+
+struct wmi_mgmt_params {
+ void *tx_frame;
+ u16 frm_len;
+ u8 vdev_id;
+ u16 chanfreq;
+ void *pdata;
+ u16 desc_id;
+ u8 *macaddr;
+};
+
+enum wmi_sta_ps_mode {
+ WMI_STA_PS_MODE_DISABLED = 0,
+ WMI_STA_PS_MODE_ENABLED = 1,
+};
+
+#define WMI_SMPS_MASK_LOWER_16BITS 0xFF
+#define WMI_SMPS_MASK_UPPER_3BITS 0x7
+#define WMI_SMPS_PARAM_VALUE_SHIFT 29
+
+#define ATH11K_WMI_FW_HANG_ASSERT_TYPE 1
+#define ATH11K_WMI_FW_HANG_DELAY 0
+
+/* type, 0:unused 1: ASSERT 2: not respond detect command
+ * delay_time_ms, the simulate will delay time
+ */
+
+struct wmi_force_fw_hang_cmd {
+ u32 tlv_header;
+ u32 type;
+ u32 delay_time_ms;
+};
+
+struct wmi_vdev_set_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 param_id;
+ u32 param_value;
+} __packed;
+
+enum wmi_stats_id {
+ WMI_REQUEST_PEER_STAT = BIT(0),
+ WMI_REQUEST_AP_STAT = BIT(1),
+ WMI_REQUEST_PDEV_STAT = BIT(2),
+ WMI_REQUEST_VDEV_STAT = BIT(3),
+ WMI_REQUEST_BCNFLT_STAT = BIT(4),
+ WMI_REQUEST_VDEV_RATE_STAT = BIT(5),
+ WMI_REQUEST_INST_STAT = BIT(6),
+ WMI_REQUEST_MIB_STAT = BIT(7),
+ WMI_REQUEST_RSSI_PER_CHAIN_STAT = BIT(8),
+ WMI_REQUEST_CONGESTION_STAT = BIT(9),
+ WMI_REQUEST_PEER_EXTD_STAT = BIT(10),
+ WMI_REQUEST_BCN_STAT = BIT(11),
+ WMI_REQUEST_BCN_STAT_RESET = BIT(12),
+ WMI_REQUEST_PEER_EXTD2_STAT = BIT(13),
+};
+
+struct wmi_request_stats_cmd {
+ u32 tlv_header;
+ enum wmi_stats_id stats_id;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_get_pdev_temperature_cmd {
+ u32 tlv_header;
+ u32 param;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_ftm_seg_hdr {
+ u32 len;
+ u32 msgref;
+ u32 segmentinfo;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_ftm_cmd {
+ u32 tlv_header;
+ struct wmi_ftm_seg_hdr seg_hdr;
+ u8 data[];
+} __packed;
+
+struct wmi_ftm_event_msg {
+ struct wmi_ftm_seg_hdr seg_hdr;
+ u8 data[];
+} __packed;
+
+#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
+
+struct wmi_p2p_noa_event {
+ u32 vdev_id;
+} __packed;
+
+struct ath11k_wmi_p2p_noa_descriptor {
+ u32 type_count; /* 255: continuous schedule, 0: reserved */
+ u32 duration; /* Absent period duration in micro seconds */
+ u32 interval; /* Absent period interval in micro seconds */
+ u32 start_time; /* 32 bit tsf time when in starts */
+} __packed;
+
+#define WMI_P2P_NOA_INFO_CHANGED_FLAG BIT(0)
+#define WMI_P2P_NOA_INFO_INDEX GENMASK(15, 8)
+#define WMI_P2P_NOA_INFO_OPP_PS BIT(16)
+#define WMI_P2P_NOA_INFO_CTWIN_TU GENMASK(23, 17)
+#define WMI_P2P_NOA_INFO_DESC_NUM GENMASK(31, 24)
+
+struct ath11k_wmi_p2p_noa_info {
+ /* Bit 0 - Flag to indicate an update in NOA schedule
+ * Bits 7-1 - Reserved
+ * Bits 15-8 - Index (identifies the instance of NOA sub element)
+ * Bit 16 - Opp PS state of the AP
+ * Bits 23-17 - Ctwindow in TUs
+ * Bits 31-24 - Number of NOA descriptors
+ */
+ u32 noa_attr;
+ struct ath11k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+#define WMI_BEACON_TX_BUFFER_SIZE 512
+
+#define WMI_EMA_TMPL_IDX_SHIFT 8
+#define WMI_EMA_FIRST_TMPL_SHIFT 16
+#define WMI_EMA_LAST_TMPL_SHIFT 24
+
+struct wmi_bcn_tmpl_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 tim_ie_offset;
+ u32 buf_len;
+ u32 csa_switch_count_offset;
+ u32 ext_csa_switch_count_offset;
+ u32 csa_event_bitmap;
+ u32 mbssid_ie_offset;
+ u32 esp_ie_offset;
+ u32 csc_switch_count_offset;
+ u32 csc_event_bitmap;
+ u32 mu_edca_ie_offset;
+ u32 feature_enable_bitmap;
+ u32 ema_params;
+} __packed;
+
+struct wmi_p2p_go_set_beacon_ie_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 ie_buf_len;
+ u8 tlv[];
+} __packed;
+
+struct wmi_key_seq_counter {
+ u32 key_seq_counter_l;
+ u32 key_seq_counter_h;
+} __packed;
+
+struct wmi_vdev_install_key_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ struct wmi_key_seq_counter key_rsc_counter;
+ struct wmi_key_seq_counter key_global_rsc_counter;
+ struct wmi_key_seq_counter key_tsc_counter;
+ u8 wpi_key_rsc_counter[16];
+ u8 wpi_key_tsc_counter[16];
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ u32 is_group_key_id_valid;
+ u32 group_key_id;
+
+ /* Followed by key_data containing key followed by
+ * tx mic and then rx mic
+ */
+} __packed;
+
+struct wmi_vdev_install_key_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ u64 key_rsc_counter;
+ const void *key_data;
+};
+
+#define WMI_MAX_SUPPORTED_RATES 128
+#define WMI_HOST_MAX_HECAP_PHY_SIZE 3
+#define WMI_HOST_MAX_HE_RATE_SET 3
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80 0
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2
+
+struct wmi_rate_set_arg {
+ u32 num_rates;
+ u8 rates[WMI_MAX_SUPPORTED_RATES];
+};
+
+struct peer_assoc_params {
+ struct wmi_mac_addr peer_macaddr;
+ u32 vdev_id;
+ u32 peer_new_assoc;
+ u32 peer_associd;
+ u32 peer_flags;
+ u32 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density;
+ u32 peer_rate_caps;
+ u32 peer_nss;
+ u32 peer_vht_caps;
+ u32 peer_phymode;
+ u32 peer_ht_info[2];
+ struct wmi_rate_set_arg peer_legacy_rates;
+ struct wmi_rate_set_arg peer_ht_rates;
+ u32 rx_max_rate;
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ u32 tx_mcs_set;
+ u8 vht_capable;
+ u8 min_data_rate;
+ u32 tx_max_mcs_nss;
+ u32 peer_bw_rxnss_override;
+ bool is_pmf_enabled;
+ bool is_wme_set;
+ bool qos_flag;
+ bool apsd_flag;
+ bool ht_flag;
+ bool bw_40;
+ bool bw_80;
+ bool bw_160;
+ bool stbc_flag;
+ bool ldpc_flag;
+ bool static_mimops_flag;
+ bool dynamic_mimops_flag;
+ bool spatial_mux_flag;
+ bool vht_flag;
+ bool vht_ng_flag;
+ bool need_ptk_4_way;
+ bool need_gtk_2_way;
+ bool auth_flag;
+ bool safe_mode_enabled;
+ bool amsdu_disable;
+ /* Use common structure */
+ u8 peer_mac[ETH_ALEN];
+
+ bool he_flag;
+ u32 peer_he_cap_macinfo[2];
+ u32 peer_he_cap_macinfo_internal;
+ u32 peer_he_caps_6ghz;
+ u32 peer_he_ops;
+ u32 peer_he_cap_phyinfo[WMI_HOST_MAX_HECAP_PHY_SIZE];
+ u32 peer_he_mcs_count;
+ u32 peer_he_rx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+ u32 peer_he_tx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+ bool twt_responder;
+ bool twt_requester;
+ bool is_assoc;
+ struct ath11k_ppe_threshold peer_ppet;
+};
+
+struct wmi_peer_assoc_complete_cmd {
+ u32 tlv_header;
+ struct wmi_mac_addr peer_macaddr;
+ u32 vdev_id;
+ u32 peer_new_assoc;
+ u32 peer_associd;
+ u32 peer_flags;
+ u32 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density;
+ u32 peer_rate_caps;
+ u32 peer_nss;
+ u32 peer_vht_caps;
+ u32 peer_phymode;
+ u32 peer_ht_info[2];
+ u32 num_peer_legacy_rates;
+ u32 num_peer_ht_rates;
+ u32 peer_bw_rxnss_override;
+ struct wmi_ppe_threshold peer_ppet;
+ u32 peer_he_cap_info;
+ u32 peer_he_ops;
+ u32 peer_he_cap_phy[WMI_MAX_HECAP_PHY_SIZE];
+ u32 peer_he_mcs;
+ u32 peer_he_cap_info_ext;
+ u32 peer_he_cap_info_internal;
+ u32 min_data_rate;
+ u32 peer_he_caps_6ghz;
+} __packed;
+
+struct wmi_stop_scan_cmd {
+ u32 tlv_header;
+ u32 requestor;
+ u32 scan_id;
+ u32 req_type;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+struct scan_chan_list_params {
+ struct list_head list;
+ u32 pdev_id;
+ u16 nallchans;
+ struct channel_param ch_param[];
+};
+
+struct wmi_scan_chan_list_cmd {
+ u32 tlv_header;
+ u32 num_scan_chans;
+ u32 flags;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_scan_prob_req_oui_cmd {
+ u32 tlv_header;
+ u32 prob_req_oui;
+} __packed;
+
+#define WMI_MGMT_SEND_DOWNLD_LEN 64
+
+#define WMI_TX_PARAMS_DWORD0_POWER GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD0_MCS_MASK GENMASK(19, 8)
+#define WMI_TX_PARAMS_DWORD0_NSS_MASK GENMASK(27, 20)
+#define WMI_TX_PARAMS_DWORD0_RETRY_LIMIT GENMASK(31, 28)
+
+#define WMI_TX_PARAMS_DWORD1_CHAIN_MASK GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD1_BW_MASK GENMASK(14, 8)
+#define WMI_TX_PARAMS_DWORD1_PREAMBLE_TYPE GENMASK(19, 15)
+#define WMI_TX_PARAMS_DWORD1_FRAME_TYPE BIT(20)
+#define WMI_TX_PARAMS_DWORD1_RSVD GENMASK(31, 21)
+
+struct wmi_mgmt_send_params {
+ u32 tlv_header;
+ u32 tx_params_dword0;
+ u32 tx_params_dword1;
+};
+
+struct wmi_mgmt_send_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 desc_id;
+ u32 chanfreq;
+ u32 paddr_lo;
+ u32 paddr_hi;
+ u32 frame_len;
+ u32 buf_len;
+ u32 tx_params_valid;
+
+ /* This TLV is followed by struct wmi_mgmt_frame */
+
+ /* Followed by struct wmi_mgmt_send_params */
+} __packed;
+
+struct wmi_sta_powersave_mode_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 sta_ps_mode;
+};
+
+struct wmi_sta_smps_force_mode_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 forced_mode;
+};
+
+struct wmi_sta_smps_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+};
+
+struct wmi_bcn_prb_info {
+ u32 tlv_header;
+ u32 caps;
+ u32 erp;
+} __packed;
+
+enum {
+ WMI_PDEV_SUSPEND,
+ WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+};
+
+struct green_ap_ps_params {
+ u32 value;
+};
+
+struct wmi_pdev_green_ap_ps_enable_cmd_param {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+};
+
+struct ap_ps_params {
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+};
+
+struct vdev_set_params {
+ u32 if_id;
+ u32 param_id;
+ u32 param_value;
+};
+
+struct stats_request_params {
+ u32 stats_id;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+struct wmi_set_current_country_params {
+ u8 alpha2[3];
+};
+
+struct wmi_set_current_country_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 new_alpha2;
+} __packed;
+
+enum set_init_cc_type {
+ WMI_COUNTRY_INFO_TYPE_ALPHA,
+ WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE,
+ WMI_COUNTRY_INFO_TYPE_REGDOMAIN,
+};
+
+enum set_init_cc_flags {
+ INVALID_CC,
+ CC_IS_SET,
+ REGDMN_IS_SET,
+ ALPHA_IS_SET,
+};
+
+struct wmi_init_country_params {
+ union {
+ u16 country_code;
+ u16 regdom_id;
+ u8 alpha2[3];
+ } cc_info;
+ enum set_init_cc_flags flags;
+};
+
+struct wmi_init_country_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 init_cc_type;
+ union {
+ u32 country_code;
+ u32 regdom_id;
+ u32 alpha2;
+ } cc_info;
+} __packed;
+
+struct wmi_11d_scan_start_params {
+ u32 vdev_id;
+ u32 scan_period_msec;
+ u32 start_interval_msec;
+};
+
+struct wmi_11d_scan_start_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 scan_period_msec;
+ u32 start_interval_msec;
+} __packed;
+
+struct wmi_11d_scan_stop_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+struct wmi_11d_new_cc_ev {
+ u32 new_alpha2;
+} __packed;
+
+#define THERMAL_LEVELS 1
+struct tt_level_config {
+ u32 tmplwm;
+ u32 tmphwm;
+ u32 dcoffpercent;
+ u32 priority;
+};
+
+struct thermal_mitigation_params {
+ u32 pdev_id;
+ u32 enable;
+ u32 dc;
+ u32 dc_per_event;
+ struct tt_level_config levelconf[THERMAL_LEVELS];
+};
+
+struct wmi_therm_throt_config_request_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+ u32 dc;
+ u32 dc_per_event;
+ u32 therm_throt_levels;
+} __packed;
+
+struct wmi_therm_throt_level_config_info {
+ u32 tlv_header;
+ u32 temp_lwm;
+ u32 temp_hwm;
+ u32 dc_off_percent;
+ u32 prio;
+} __packed;
+
+struct wmi_delba_send_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 initiator;
+ u32 reasoncode;
+} __packed;
+
+struct wmi_addba_setresponse_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 statuscode;
+} __packed;
+
+struct wmi_addba_send_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 buffersize;
+} __packed;
+
+struct wmi_addba_clear_resp_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_pdev_pktlog_filter_info {
+ u32 tlv_header;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_pdev_pktlog_filter_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+ u32 filter_type;
+ u32 num_mac;
+} __packed;
+
+enum ath11k_wmi_pktlog_enable {
+ ATH11K_WMI_PKTLOG_ENABLE_AUTO = 0,
+ ATH11K_WMI_PKTLOG_ENABLE_FORCE = 1,
+};
+
+struct wmi_pktlog_enable_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 evlist; /* WMI_PKTLOG_EVENT */
+ u32 enable;
+} __packed;
+
+struct wmi_pktlog_disable_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+#define DFS_PHYERR_UNIT_TEST_CMD 0
+#define DFS_UNIT_TEST_MODULE 0x2b
+#define DFS_UNIT_TEST_TOKEN 0xAA
+
+enum dfs_test_args_idx {
+ DFS_TEST_CMDID = 0,
+ DFS_TEST_PDEV_ID,
+ DFS_TEST_RADAR_PARAM,
+ DFS_MAX_TEST_ARGS,
+};
+
+struct wmi_dfs_unit_test_arg {
+ u32 cmd_id;
+ u32 pdev_id;
+ u32 radar_param;
+};
+
+struct wmi_unit_test_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 module_id;
+ u32 num_args;
+ u32 diag_token;
+ /* Followed by test args*/
+} __packed;
+
+#define MAX_SUPPORTED_RATES 128
+
+struct beacon_tmpl_params {
+ u8 vdev_id;
+ u32 tim_ie_offset;
+ u32 tmpl_len;
+ u32 tmpl_len_aligned;
+ u32 csa_switch_count_offset;
+ u32 ext_csa_switch_count_offset;
+ u8 *frm;
+};
+
+struct wmi_rate_set {
+ u32 num_rates;
+ u32 rates[(MAX_SUPPORTED_RATES / 4) + 1];
+};
+
+struct wmi_vht_rate_set {
+ u32 tlv_header;
+ u32 rx_max_rate;
+ /* MCS at which the peer can transmit */
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ /* MCS at which the peer can receive */
+ u32 tx_mcs_set;
+ u32 tx_max_mcs_nss;
+} __packed;
+
+struct wmi_he_rate_set {
+ u32 tlv_header;
+
+ /* MCS at which the peer can receive */
+ u32 rx_mcs_set;
+
+ /* MCS at which the peer can transmit */
+ u32 tx_mcs_set;
+} __packed;
+
+#define MAX_REG_RULES 10
+#define REG_ALPHA2_LEN 2
+#define MAX_6GHZ_REG_RULES 5
+
+enum wmi_start_event_param {
+ WMI_VDEV_START_RESP_EVENT = 0,
+ WMI_VDEV_RESTART_RESP_EVENT,
+};
+
+struct wmi_vdev_start_resp_event {
+ u32 vdev_id;
+ u32 requestor_id;
+ enum wmi_start_event_param resp_type;
+ u32 status;
+ u32 chain_mask;
+ u32 smps_mode;
+ union {
+ u32 mac_id;
+ u32 pdev_id;
+ };
+ u32 cfgd_tx_streams;
+ u32 cfgd_rx_streams;
+ s32 max_allowed_tx_power;
+} __packed;
+
+/* VDEV start response status codes */
+enum wmi_vdev_start_resp_status_code {
+ WMI_VDEV_START_RESPONSE_STATUS_SUCCESS = 0,
+ WMI_VDEV_START_RESPONSE_INVALID_VDEVID = 1,
+ WMI_VDEV_START_RESPONSE_NOT_SUPPORTED = 2,
+ WMI_VDEV_START_RESPONSE_DFS_VIOLATION = 3,
+ WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN = 4,
+};
+
+/* Regaulatory Rule Flags Passed by FW */
+#define REGULATORY_CHAN_DISABLED BIT(0)
+#define REGULATORY_CHAN_NO_IR BIT(1)
+#define REGULATORY_CHAN_RADAR BIT(3)
+#define REGULATORY_CHAN_NO_OFDM BIT(6)
+#define REGULATORY_CHAN_INDOOR_ONLY BIT(9)
+
+#define REGULATORY_CHAN_NO_HT40 BIT(4)
+#define REGULATORY_CHAN_NO_80MHZ BIT(7)
+#define REGULATORY_CHAN_NO_160MHZ BIT(8)
+#define REGULATORY_CHAN_NO_20MHZ BIT(11)
+#define REGULATORY_CHAN_NO_10MHZ BIT(12)
+
+enum wmi_reg_chan_list_cmd_type {
+ WMI_REG_CHAN_LIST_CC_ID = 0,
+ WMI_REG_CHAN_LIST_CC_EXT_ID = 1,
+};
+
+enum wmi_reg_cc_setting_code {
+ WMI_REG_SET_CC_STATUS_PASS = 0,
+ WMI_REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+ WMI_REG_INIT_ALPHA2_NOT_FOUND = 2,
+ WMI_REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+ WMI_REG_SET_CC_STATUS_NO_MEMORY = 4,
+ WMI_REG_SET_CC_STATUS_FAIL = 5,
+
+ /* add new setting code above, update in
+ * @enum cc_setting_code as well.
+ * Also handle it in ath11k_wmi_cc_setting_code_to_reg()
+ */
+};
+
+enum cc_setting_code {
+ REG_SET_CC_STATUS_PASS = 0,
+ REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+ REG_INIT_ALPHA2_NOT_FOUND = 2,
+ REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+ REG_SET_CC_STATUS_NO_MEMORY = 4,
+ REG_SET_CC_STATUS_FAIL = 5,
+
+ /* add new setting code above, update in
+ * @enum wmi_reg_cc_setting_code as well.
+ * Also handle it in ath11k_cc_status_to_str()
+ */
+};
+
+static inline enum cc_setting_code
+ath11k_wmi_cc_setting_code_to_reg(enum wmi_reg_cc_setting_code status_code)
+{
+ switch (status_code) {
+ case WMI_REG_SET_CC_STATUS_PASS:
+ return REG_SET_CC_STATUS_PASS;
+ case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
+ return REG_CURRENT_ALPHA2_NOT_FOUND;
+ case WMI_REG_INIT_ALPHA2_NOT_FOUND:
+ return REG_INIT_ALPHA2_NOT_FOUND;
+ case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
+ return REG_SET_CC_CHANGE_NOT_ALLOWED;
+ case WMI_REG_SET_CC_STATUS_NO_MEMORY:
+ return REG_SET_CC_STATUS_NO_MEMORY;
+ case WMI_REG_SET_CC_STATUS_FAIL:
+ return REG_SET_CC_STATUS_FAIL;
+ }
+
+ return REG_SET_CC_STATUS_FAIL;
+}
+
+static inline const char *ath11k_cc_status_to_str(enum cc_setting_code code)
+{
+ switch (code) {
+ case REG_SET_CC_STATUS_PASS:
+ return "REG_SET_CC_STATUS_PASS";
+ case REG_CURRENT_ALPHA2_NOT_FOUND:
+ return "REG_CURRENT_ALPHA2_NOT_FOUND";
+ case REG_INIT_ALPHA2_NOT_FOUND:
+ return "REG_INIT_ALPHA2_NOT_FOUND";
+ case REG_SET_CC_CHANGE_NOT_ALLOWED:
+ return "REG_SET_CC_CHANGE_NOT_ALLOWED";
+ case REG_SET_CC_STATUS_NO_MEMORY:
+ return "REG_SET_CC_STATUS_NO_MEMORY";
+ case REG_SET_CC_STATUS_FAIL:
+ return "REG_SET_CC_STATUS_FAIL";
+ }
+
+ return "Unknown CC status";
+}
+
+enum wmi_reg_6ghz_ap_type {
+ WMI_REG_INDOOR_AP = 0,
+ WMI_REG_STANDARD_POWER_AP = 1,
+ WMI_REG_VERY_LOW_POWER_AP = 2,
+
+ /* add AP type above, handle in ath11k_6ghz_ap_type_to_str()
+ */
+ WMI_REG_CURRENT_MAX_AP_TYPE,
+ WMI_REG_MAX_AP_TYPE = 7,
+};
+
+static inline const char *
+ath11k_6ghz_ap_type_to_str(enum wmi_reg_6ghz_ap_type type)
+{
+ switch (type) {
+ case WMI_REG_INDOOR_AP:
+ return "INDOOR AP";
+ case WMI_REG_STANDARD_POWER_AP:
+ return "STANDARD POWER AP";
+ case WMI_REG_VERY_LOW_POWER_AP:
+ return "VERY LOW POWER AP";
+ case WMI_REG_CURRENT_MAX_AP_TYPE:
+ return "CURRENT_MAX_AP_TYPE";
+ case WMI_REG_MAX_AP_TYPE:
+ return "MAX_AP_TYPE";
+ }
+
+ return "unknown 6 GHz AP type";
+}
+
+enum wmi_reg_6ghz_client_type {
+ WMI_REG_DEFAULT_CLIENT = 0,
+ WMI_REG_SUBORDINATE_CLIENT = 1,
+ WMI_REG_MAX_CLIENT_TYPE = 2,
+
+ /* add client type above, handle it in
+ * ath11k_6ghz_client_type_to_str()
+ */
+};
+
+static inline const char *
+ath11k_6ghz_client_type_to_str(enum wmi_reg_6ghz_client_type type)
+{
+ switch (type) {
+ case WMI_REG_DEFAULT_CLIENT:
+ return "DEFAULT CLIENT";
+ case WMI_REG_SUBORDINATE_CLIENT:
+ return "SUBORDINATE CLIENT";
+ case WMI_REG_MAX_CLIENT_TYPE:
+ return "MAX_CLIENT_TYPE";
+ }
+
+ return "unknown 6 GHz client type";
+}
+
+enum reg_subdomains_6ghz {
+ EMPTY_6GHZ = 0x0,
+ FCC1_CLIENT_LPI_REGULAR_6GHZ = 0x01,
+ FCC1_CLIENT_SP_6GHZ = 0x02,
+ FCC1_AP_LPI_6GHZ = 0x03,
+ FCC1_CLIENT_LPI_SUBORDINATE = FCC1_AP_LPI_6GHZ,
+ FCC1_AP_SP_6GHZ = 0x04,
+ ETSI1_LPI_6GHZ = 0x10,
+ ETSI1_VLP_6GHZ = 0x11,
+ ETSI2_LPI_6GHZ = 0x12,
+ ETSI2_VLP_6GHZ = 0x13,
+ APL1_LPI_6GHZ = 0x20,
+ APL1_VLP_6GHZ = 0x21,
+
+ /* add sub-domain above, handle it in
+ * ath11k_sub_reg_6ghz_to_str()
+ */
+};
+
+static inline const char *
+ath11k_sub_reg_6ghz_to_str(enum reg_subdomains_6ghz sub_id)
+{
+ switch (sub_id) {
+ case EMPTY_6GHZ:
+ return "N/A";
+ case FCC1_CLIENT_LPI_REGULAR_6GHZ:
+ return "FCC1_CLIENT_LPI_REGULAR_6GHZ";
+ case FCC1_CLIENT_SP_6GHZ:
+ return "FCC1_CLIENT_SP_6GHZ";
+ case FCC1_AP_LPI_6GHZ:
+ return "FCC1_AP_LPI_6GHZ/FCC1_CLIENT_LPI_SUBORDINATE";
+ case FCC1_AP_SP_6GHZ:
+ return "FCC1_AP_SP_6GHZ";
+ case ETSI1_LPI_6GHZ:
+ return "ETSI1_LPI_6GHZ";
+ case ETSI1_VLP_6GHZ:
+ return "ETSI1_VLP_6GHZ";
+ case ETSI2_LPI_6GHZ:
+ return "ETSI2_LPI_6GHZ";
+ case ETSI2_VLP_6GHZ:
+ return "ETSI2_VLP_6GHZ";
+ case APL1_LPI_6GHZ:
+ return "APL1_LPI_6GHZ";
+ case APL1_VLP_6GHZ:
+ return "APL1_VLP_6GHZ";
+ }
+
+ return "unknown sub reg id";
+}
+
+enum reg_super_domain_6ghz {
+ FCC1_6GHZ = 0x01,
+ ETSI1_6GHZ = 0x02,
+ ETSI2_6GHZ = 0x03,
+ APL1_6GHZ = 0x04,
+ FCC1_6GHZ_CL = 0x05,
+
+ /* add super domain above, handle it in
+ * ath11k_super_reg_6ghz_to_str()
+ */
+};
+
+static inline const char *
+ath11k_super_reg_6ghz_to_str(enum reg_super_domain_6ghz domain_id)
+{
+ switch (domain_id) {
+ case FCC1_6GHZ:
+ return "FCC1_6GHZ";
+ case ETSI1_6GHZ:
+ return "ETSI1_6GHZ";
+ case ETSI2_6GHZ:
+ return "ETSI2_6GHZ";
+ case APL1_6GHZ:
+ return "APL1_6GHZ";
+ case FCC1_6GHZ_CL:
+ return "FCC1_6GHZ_CL";
+ }
+
+ return "unknown domain id";
+}
+
+struct cur_reg_rule {
+ u16 start_freq;
+ u16 end_freq;
+ u16 max_bw;
+ u8 reg_power;
+ u8 ant_gain;
+ u16 flags;
+ bool psd_flag;
+ s8 psd_eirp;
+};
+
+struct cur_regulatory_info {
+ enum cc_setting_code status_code;
+ u8 num_phy;
+ u8 phy_id;
+ u16 reg_dmn_pair;
+ u16 ctry_code;
+ u8 alpha2[REG_ALPHA2_LEN + 1];
+ u32 dfs_region;
+ u32 phybitmap;
+ u32 min_bw_2ghz;
+ u32 max_bw_2ghz;
+ u32 min_bw_5ghz;
+ u32 max_bw_5ghz;
+ u32 num_2ghz_reg_rules;
+ u32 num_5ghz_reg_rules;
+ struct cur_reg_rule *reg_rules_2ghz_ptr;
+ struct cur_reg_rule *reg_rules_5ghz_ptr;
+ bool is_ext_reg_event;
+ enum wmi_reg_6ghz_client_type client_type;
+ bool rnr_tpe_usable;
+ bool unspecified_ap_usable;
+ u8 domain_code_6ghz_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u8 domain_code_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 domain_code_6ghz_super_id;
+ u32 min_bw_6ghz_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 max_bw_6ghz_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 min_bw_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 max_bw_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 num_6ghz_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 num_6ghz_rules_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ struct cur_reg_rule *reg_rules_6ghz_ap_ptr[WMI_REG_CURRENT_MAX_AP_TYPE];
+ struct cur_reg_rule *reg_rules_6ghz_client_ptr
+ [WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+};
+
+struct wmi_reg_chan_list_cc_event {
+ u32 status_code;
+ u32 phy_id;
+ u32 alpha2;
+ u32 num_phy;
+ u32 country_id;
+ u32 domain_code;
+ u32 dfs_region;
+ u32 phybitmap;
+ u32 min_bw_2ghz;
+ u32 max_bw_2ghz;
+ u32 min_bw_5ghz;
+ u32 max_bw_5ghz;
+ u32 num_2ghz_reg_rules;
+ u32 num_5ghz_reg_rules;
+} __packed;
+
+struct wmi_regulatory_rule_struct {
+ u32 tlv_header;
+ u32 freq_info;
+ u32 bw_pwr_info;
+ u32 flag_info;
+};
+
+#define WMI_REG_CLIENT_MAX 4
+
+struct wmi_reg_chan_list_cc_ext_event {
+ u32 status_code;
+ u32 phy_id;
+ u32 alpha2;
+ u32 num_phy;
+ u32 country_id;
+ u32 domain_code;
+ u32 dfs_region;
+ u32 phybitmap;
+ u32 min_bw_2ghz;
+ u32 max_bw_2ghz;
+ u32 min_bw_5ghz;
+ u32 max_bw_5ghz;
+ u32 num_2ghz_reg_rules;
+ u32 num_5ghz_reg_rules;
+ u32 client_type;
+ u32 rnr_tpe_usable;
+ u32 unspecified_ap_usable;
+ u32 domain_code_6ghz_ap_lpi;
+ u32 domain_code_6ghz_ap_sp;
+ u32 domain_code_6ghz_ap_vlp;
+ u32 domain_code_6ghz_client_lpi[WMI_REG_CLIENT_MAX];
+ u32 domain_code_6ghz_client_sp[WMI_REG_CLIENT_MAX];
+ u32 domain_code_6ghz_client_vlp[WMI_REG_CLIENT_MAX];
+ u32 domain_code_6ghz_super_id;
+ u32 min_bw_6ghz_ap_sp;
+ u32 max_bw_6ghz_ap_sp;
+ u32 min_bw_6ghz_ap_lpi;
+ u32 max_bw_6ghz_ap_lpi;
+ u32 min_bw_6ghz_ap_vlp;
+ u32 max_bw_6ghz_ap_vlp;
+ u32 min_bw_6ghz_client_sp[WMI_REG_CLIENT_MAX];
+ u32 max_bw_6ghz_client_sp[WMI_REG_CLIENT_MAX];
+ u32 min_bw_6ghz_client_lpi[WMI_REG_CLIENT_MAX];
+ u32 max_bw_6ghz_client_lpi[WMI_REG_CLIENT_MAX];
+ u32 min_bw_6ghz_client_vlp[WMI_REG_CLIENT_MAX];
+ u32 max_bw_6ghz_client_vlp[WMI_REG_CLIENT_MAX];
+ u32 num_6ghz_reg_rules_ap_sp;
+ u32 num_6ghz_reg_rules_ap_lpi;
+ u32 num_6ghz_reg_rules_ap_vlp;
+ u32 num_6ghz_reg_rules_client_sp[WMI_REG_CLIENT_MAX];
+ u32 num_6ghz_reg_rules_client_lpi[WMI_REG_CLIENT_MAX];
+ u32 num_6ghz_reg_rules_client_vlp[WMI_REG_CLIENT_MAX];
+} __packed;
+
+struct wmi_regulatory_ext_rule {
+ u32 tlv_header;
+ u32 freq_info;
+ u32 bw_pwr_info;
+ u32 flag_info;
+ u32 psd_power_info;
+} __packed;
+
+struct wmi_vdev_delete_resp_event {
+ u32 vdev_id;
+} __packed;
+
+struct wmi_peer_delete_resp_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_bcn_tx_status_event {
+ u32 vdev_id;
+ u32 tx_status;
+} __packed;
+
+struct wmi_vdev_stopped_event {
+ u32 vdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_event {
+ u32 freq; /* Units in MHz */
+ u32 noise_floor; /* units are dBm */
+ /* rx clear - how often the channel was unused */
+ u32 rx_clear_count_low;
+ u32 rx_clear_count_high;
+ /* cycle count - elapsed time during measured period, in clock ticks */
+ u32 cycle_count_low;
+ u32 cycle_count_high;
+ /* tx cycle count - elapsed time spent in tx, in clock ticks */
+ u32 tx_cycle_count_low;
+ u32 tx_cycle_count_high;
+ /* rx cycle count - elapsed time spent in rx, in clock ticks */
+ u32 rx_cycle_count_low;
+ u32 rx_cycle_count_high;
+ /*rx_cycle cnt for my bss in 64bits format */
+ u32 rx_bss_cycle_count_low;
+ u32 rx_bss_cycle_count_high;
+ u32 pdev_id;
+} __packed;
+
+#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
+
+struct wmi_vdev_install_key_compl_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 status;
+} __packed;
+
+struct wmi_vdev_install_key_complete_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 status;
+};
+
+struct wmi_peer_assoc_conf_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_assoc_conf_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+};
+
+struct wmi_fils_discovery_event {
+ u32 vdev_id;
+ u32 fils_tt;
+ u32 tbtt;
+} __packed;
+
+struct wmi_probe_resp_tx_status_event {
+ u32 vdev_id;
+ u32 tx_status;
+} __packed;
+
+/*
+ * PDEV statistics
+ */
+struct wmi_pdev_stats_base {
+ s32 chan_nf;
+ u32 tx_frame_count; /* Cycles spent transmitting frames */
+ u32 rx_frame_count; /* Cycles spent receiving frames */
+ u32 rx_clear_count; /* Total channel busy time, evidently */
+ u32 cycle_count; /* Total on-channel time */
+ u32 phy_err_count;
+ u32 chan_tx_pwr;
+} __packed;
+
+struct wmi_pdev_stats_extra {
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+} __packed;
+
+struct wmi_pdev_stats_tx {
+ /* Num HTT cookies queued to dispatch list */
+ s32 comp_queued;
+
+ /* Num HTT cookies dispatched */
+ s32 comp_delivered;
+
+ /* Num MSDU queued to WAL */
+ s32 msdu_enqued;
+
+ /* Num MPDU queue to WAL */
+ s32 mpdu_enqued;
+
+ /* Num MSDUs dropped by WMM limit */
+ s32 wmm_drop;
+
+ /* Num Local frames queued */
+ s32 local_enqued;
+
+ /* Num Local frames done */
+ s32 local_freed;
+
+ /* Num queued to HW */
+ s32 hw_queued;
+
+ /* Num PPDU reaped from HW */
+ s32 hw_reaped;
+
+ /* Num underruns */
+ s32 underrun;
+
+ /* Num hw paused */
+ u32 hw_paused;
+
+ /* Num PPDUs cleaned up in TX abort */
+ s32 tx_abort;
+
+ /* Num MPDUs requeued by SW */
+ s32 mpdus_requeued;
+
+ /* excessive retries */
+ u32 tx_ko;
+
+ u32 tx_xretry;
+
+ /* data hw rate code */
+ u32 data_rc;
+
+ /* Scheduler self triggers */
+ u32 self_triggers;
+
+ /* frames dropped due to excessive sw retries */
+ u32 sw_retry_failure;
+
+ /* illegal rate phy errors */
+ u32 illgl_rate_phy_err;
+
+ /* wal pdev continuous xretry */
+ u32 pdev_cont_xretry;
+
+ /* wal pdev tx timeouts */
+ u32 pdev_tx_timeout;
+
+ /* wal pdev resets */
+ u32 pdev_resets;
+
+ /* frames dropped due to non-availability of stateless TIDs */
+ u32 stateless_tid_alloc_failure;
+
+ /* PhY/BB underrun */
+ u32 phy_underrun;
+
+ /* MPDU is more than txop limit */
+ u32 txop_ovf;
+
+ /* Num sequences posted */
+ u32 seq_posted;
+
+ /* Num sequences failed in queueing */
+ u32 seq_failed_queueing;
+
+ /* Num sequences completed */
+ u32 seq_completed;
+
+ /* Num sequences restarted */
+ u32 seq_restarted;
+
+ /* Num of MU sequences posted */
+ u32 mu_seq_posted;
+
+ /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT
+ * (Reset,channel change)
+ */
+ s32 mpdus_sw_flush;
+
+ /* Num MPDUs filtered by HW, all filter condition (TTL expired) */
+ s32 mpdus_hw_filter;
+
+ /* Num MPDUs truncated by PDG (TXOP, TBTT,
+ * PPDU_duration based on rate, dyn_bw)
+ */
+ s32 mpdus_truncated;
+
+ /* Num MPDUs that was tried but didn't receive ACK or BA */
+ s32 mpdus_ack_failed;
+
+ /* Num MPDUs that was dropped du to expiry. */
+ s32 mpdus_expired;
+} __packed;
+
+struct wmi_pdev_stats_rx {
+ /* Cnts any change in ring routing mid-ppdu */
+ s32 mid_ppdu_route_change;
+
+ /* Total number of statuses processed */
+ s32 status_rcvd;
+
+ /* Extra frags on rings 0-3 */
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+
+ /* MSDUs / MPDUs delivered to HTT */
+ s32 htt_msdus;
+ s32 htt_mpdus;
+
+ /* MSDUs / MPDUs delivered to local stack */
+ s32 loc_msdus;
+ s32 loc_mpdus;
+
+ /* AMSDUs that have more MSDUs than the status ring size */
+ s32 oversize_amsdu;
+
+ /* Number of PHY errors */
+ s32 phy_errs;
+
+ /* Number of PHY errors drops */
+ s32 phy_err_drop;
+
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ s32 mpdu_errs;
+
+ /* Num overflow errors */
+ s32 rx_ovfl_errs;
+} __packed;
+
+struct wmi_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ struct wmi_pdev_stats_rx rx;
+} __packed;
+
+#define WLAN_MAX_AC 4
+#define MAX_TX_RATE_VALUES 10
+#define MAX_TX_RATE_VALUES 10
+
+struct wmi_vdev_stats {
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[WLAN_MAX_AC];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[WLAN_MAX_AC];
+ u32 num_tx_frames_failures[WLAN_MAX_AC];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[MAX_TX_RATE_VALUES];
+ u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+} __packed;
+
+struct wmi_bcn_stats {
+ u32 vdev_id;
+ u32 tx_bcn_succ_cnt;
+ u32 tx_bcn_outage_cnt;
+} __packed;
+
+struct wmi_stats_event {
+ u32 stats_id;
+ u32 num_pdev_stats;
+ u32 num_vdev_stats;
+ u32 num_peer_stats;
+ u32 num_bcnflt_stats;
+ u32 num_chan_stats;
+ u32 num_mib_stats;
+ u32 pdev_id;
+ u32 num_bcn_stats;
+ u32 num_peer_extd_stats;
+ u32 num_peer_extd2_stats;
+} __packed;
+
+struct wmi_rssi_stats {
+ u32 vdev_id;
+ u32 rssi_avg_beacon[WMI_MAX_CHAINS];
+ u32 rssi_avg_data[WMI_MAX_CHAINS];
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_per_chain_rssi_stats {
+ u32 num_per_chain_rssi_stats;
+} __packed;
+
+struct wmi_pdev_ctl_failsafe_chk_event {
+ u32 pdev_id;
+ u32 ctl_failsafe_status;
+} __packed;
+
+struct wmi_pdev_csa_switch_ev {
+ u32 pdev_id;
+ u32 current_switch_count;
+ u32 num_vdevs;
+} __packed;
+
+struct wmi_pdev_radar_ev {
+ u32 pdev_id;
+ u32 detection_mode;
+ u32 chan_freq;
+ u32 chan_width;
+ u32 detector_id;
+ u32 segment_id;
+ u32 timestamp;
+ u32 is_chirp;
+ s32 freq_offset;
+ s32 sidx;
+} __packed;
+
+struct wmi_pdev_temperature_event {
+ /* temperature value in Celsius degree */
+ s32 temp;
+ u32 pdev_id;
+} __packed;
+
+#define WMI_RX_STATUS_OK 0x00
+#define WMI_RX_STATUS_ERR_CRC 0x01
+#define WMI_RX_STATUS_ERR_DECRYPT 0x08
+#define WMI_RX_STATUS_ERR_MIC 0x10
+#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+
+#define WLAN_MGMT_TXRX_HOST_MAX_ANTENNA 4
+
+struct mgmt_rx_event_params {
+ u32 chan_freq;
+ u32 channel;
+ u32 snr;
+ u8 rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA];
+ u32 rate;
+ enum wmi_phy_mode phy_mode;
+ u32 buf_len;
+ int status;
+ u32 flags;
+ int rssi;
+ u32 tsf_delta;
+ u8 pdev_id;
+};
+
+#define ATH_MAX_ANTENNA 4
+
+struct wmi_mgmt_rx_hdr {
+ u32 channel;
+ u32 snr;
+ u32 rate;
+ u32 phy_mode;
+ u32 buf_len;
+ u32 status;
+ u32 rssi_ctl[ATH_MAX_ANTENNA];
+ u32 flags;
+ int rssi;
+ u32 tsf_delta;
+ u32 rx_tsf_l32;
+ u32 rx_tsf_u32;
+ u32 pdev_id;
+ u32 chan_freq;
+} __packed;
+
+#define MAX_ANTENNA_EIGHT 8
+
+struct wmi_rssi_ctl_ext {
+ u32 tlv_header;
+ u32 rssi_ctl_ext[MAX_ANTENNA_EIGHT - ATH_MAX_ANTENNA];
+};
+
+struct wmi_mgmt_tx_compl_event {
+ u32 desc_id;
+ u32 status;
+ u32 pdev_id;
+ u32 ppdu_id;
+ u32 ack_rssi;
+} __packed;
+
+struct wmi_scan_event {
+ u32 event_type; /* %WMI_SCAN_EVENT_ */
+ u32 reason; /* %WMI_SCAN_REASON_ */
+ u32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+ u32 scan_req_id;
+ u32 scan_id;
+ u32 vdev_id;
+ /* TSF Timestamp when the scan event (%WMI_SCAN_EVENT_) is completed
+ * In case of AP it is TSF of the AP vdev
+ * In case of STA connected state, this is the TSF of the AP
+ * In case of STA not connected, it will be the free running HW timer
+ */
+ u32 tsf_timestamp;
+} __packed;
+
+struct wmi_peer_sta_kickout_arg {
+ const u8 *mac_addr;
+};
+
+struct wmi_peer_sta_kickout_event {
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+enum wmi_roam_reason {
+ WMI_ROAM_REASON_BETTER_AP = 1,
+ WMI_ROAM_REASON_BEACON_MISS = 2,
+ WMI_ROAM_REASON_LOW_RSSI = 3,
+ WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+ WMI_ROAM_REASON_HO_FAILED = 5,
+
+ /* keep last */
+ WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_event {
+ u32 vdev_id;
+ u32 reason;
+ u32 rssi;
+} __packed;
+
+#define WMI_CHAN_INFO_START_RESP 0
+#define WMI_CHAN_INFO_END_RESP 1
+
+struct wmi_chan_info_event {
+ u32 err_code;
+ u32 freq;
+ u32 cmd_flags;
+ u32 noise_floor;
+ u32 rx_clear_count;
+ u32 cycle_count;
+ u32 chan_tx_pwr_range;
+ u32 chan_tx_pwr_tp;
+ u32 rx_frame_count;
+ u32 my_bss_rx_cycle_count;
+ u32 rx_11b_mode_data_duration;
+ u32 tx_frame_cnt;
+ u32 mac_clk_mhz;
+ u32 vdev_id;
+} __packed;
+
+struct ath11k_targ_cap {
+ u32 phy_capability;
+ u32 max_frag_entry;
+ u32 num_rf_chains;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ u32 vht_supp_mcs;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 sys_cap_info;
+ u32 min_pkt_size_enable;
+ u32 max_bcn_ie_size;
+ u32 max_num_scan_channels;
+ u32 max_supported_macs;
+ u32 wmi_fw_sub_feat_caps;
+ u32 txrx_chainmask;
+ u32 default_dbs_hw_mode_index;
+ u32 num_msdu_desc;
+};
+
+enum wmi_vdev_type {
+ WMI_VDEV_TYPE_UNSPEC = 0,
+ WMI_VDEV_TYPE_AP = 1,
+ WMI_VDEV_TYPE_STA = 2,
+ WMI_VDEV_TYPE_IBSS = 3,
+ WMI_VDEV_TYPE_MONITOR = 4,
+};
+
+enum wmi_vdev_subtype {
+ WMI_VDEV_SUBTYPE_NONE,
+ WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ WMI_VDEV_SUBTYPE_P2P_CLIENT,
+ WMI_VDEV_SUBTYPE_P2P_GO,
+ WMI_VDEV_SUBTYPE_PROXY_STA,
+ WMI_VDEV_SUBTYPE_MESH_NON_11S,
+ WMI_VDEV_SUBTYPE_MESH_11S,
+};
+
+enum wmi_sta_powersave_param {
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
+ WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
+ WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
+ WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
+ WMI_STA_PS_PARAM_UAPSD = 4,
+};
+
+#define WMI_UAPSD_AC_TYPE_DELI 0
+#define WMI_UAPSD_AC_TYPE_TRIG 1
+
+#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
+ ((type == WMI_UAPSD_AC_TYPE_DELI) ? \
+ (1 << (ac << 1)) : (1 << ((ac << 1) + 1)))
+
+enum wmi_sta_ps_param_uapsd {
+ WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX
+
+struct wmi_sta_uapsd_auto_trig_param {
+ u32 wmm_ac;
+ u32 user_priority;
+ u32 service_interval;
+ u32 suspend_interval;
+ u32 delay_interval;
+};
+
+struct wmi_sta_uapsd_auto_trig_cmd_fixed_param {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 num_ac;
+};
+
+struct wmi_sta_uapsd_auto_trig_arg {
+ u32 wmm_ac;
+ u32 user_priority;
+ u32 service_interval;
+ u32 suspend_interval;
+ u32 delay_interval;
+};
+
+enum wmi_sta_ps_param_tx_wake_threshold {
+ WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
+ WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
+
+ /* Values greater than one indicate that many TX attempts per beacon
+ * interval before the STA will wake up
+ */
+};
+
+/* The maximum number of PS-Poll frames the FW will send in response to
+ * traffic advertised in TIM before waking up (by sending a null frame with PS
+ * = 0). Value 0 has a special meaning: there is no maximum count and the FW
+ * will send as many PS-Poll as are necessary to retrieve buffered BU. This
+ * parameter is used when the RX wake policy is
+ * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
+ * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
+ */
+enum wmi_sta_ps_param_pspoll_count {
+ WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
+ /* Values greater than 0 indicate the maximum number of PS-Poll frames
+ * FW will send before waking up.
+ */
+};
+
+/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
+enum wmi_ap_ps_param_uapsd {
+ WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+/* U-APSD maximum service period of peer station */
+enum wmi_ap_ps_peer_param_max_sp {
+ WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
+ MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
+};
+
+enum wmi_ap_ps_peer_param {
+ /** Set uapsd configuration for a given peer.
+ *
+ * This include the delivery and trigger enabled state for each AC.
+ * The host MLME needs to set this based on AP capability and stations
+ * request Set in the association request received from the station.
+ *
+ * Lower 8 bits of the value specify the UAPSD configuration.
+ *
+ * (see enum wmi_ap_ps_param_uapsd)
+ * The default value is 0.
+ */
+ WMI_AP_PS_PEER_PARAM_UAPSD = 0,
+
+ /**
+ * Set the service period for a UAPSD capable station
+ *
+ * The service period from wme ie in the (re)assoc request frame.
+ *
+ * (see enum wmi_ap_ps_peer_param_max_sp)
+ */
+ WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
+
+ /** Time in seconds for aging out buffered frames
+ * for STA in power save
+ */
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
+
+ /** Specify frame types that are considered SIFS
+ * RESP trigger frame
+ */
+ WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE = 3,
+
+ /** Specifies the trigger state of TID.
+ * Valid only for UAPSD frame type
+ */
+ WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD = 4,
+
+ /* Specifies the WNM sleep state of a STA */
+ WMI_AP_PS_PEER_PARAM_WNM_SLEEP = 5,
+};
+
+#define DISABLE_SIFS_RESPONSE_TRIGGER 0
+
+#define WMI_MAX_KEY_INDEX 3
+#define WMI_MAX_KEY_LEN 32
+
+#define WMI_KEY_PAIRWISE 0x00
+#define WMI_KEY_GROUP 0x01
+
+#define WMI_CIPHER_NONE 0x0 /* clear key */
+#define WMI_CIPHER_WEP 0x1
+#define WMI_CIPHER_TKIP 0x2
+#define WMI_CIPHER_AES_OCB 0x3
+#define WMI_CIPHER_AES_CCM 0x4
+#define WMI_CIPHER_WAPI 0x5
+#define WMI_CIPHER_CKIP 0x6
+#define WMI_CIPHER_AES_CMAC 0x7
+#define WMI_CIPHER_ANY 0x8
+#define WMI_CIPHER_AES_GCM 0x9
+#define WMI_CIPHER_AES_GMAC 0xa
+
+/* Value to disable fixed rate setting */
+#define WMI_FIXED_RATE_NONE (0xffff)
+
+#define ATH11K_RC_VERSION_OFFSET 28
+#define ATH11K_RC_PREAMBLE_OFFSET 8
+#define ATH11K_RC_NSS_OFFSET 5
+
+#define ATH11K_HW_RATE_CODE(rate, nss, preamble) \
+ ((1 << ATH11K_RC_VERSION_OFFSET) | \
+ ((nss) << ATH11K_RC_NSS_OFFSET) | \
+ ((preamble) << ATH11K_RC_PREAMBLE_OFFSET) | \
+ (rate))
+
+/* Preamble types to be used with VDEV fixed rate configuration */
+enum wmi_rate_preamble {
+ WMI_RATE_PREAMBLE_OFDM,
+ WMI_RATE_PREAMBLE_CCK,
+ WMI_RATE_PREAMBLE_HT,
+ WMI_RATE_PREAMBLE_VHT,
+ WMI_RATE_PREAMBLE_HE,
+};
+
+/**
+ * enum wmi_rtscts_prot_mode - Enable/Disable RTS/CTS and CTS2Self Protection.
+ * @WMI_RTS_CTS_DISABLED: RTS/CTS protection is disabled.
+ * @WMI_USE_RTS_CTS: RTS/CTS Enabled.
+ * @WMI_USE_CTS2SELF: CTS to self protection Enabled.
+ */
+enum wmi_rtscts_prot_mode {
+ WMI_RTS_CTS_DISABLED = 0,
+ WMI_USE_RTS_CTS = 1,
+ WMI_USE_CTS2SELF = 2,
+};
+
+/**
+ * enum wmi_rtscts_profile - Selection of RTS CTS profile along with enabling
+ * protection mode.
+ * @WMI_RTSCTS_FOR_NO_RATESERIES: Neither of rate-series should use RTS-CTS
+ * @WMI_RTSCTS_FOR_SECOND_RATESERIES: Only second rate-series will use RTS-CTS
+ * @WMI_RTSCTS_ACROSS_SW_RETRIES: Only the second rate-series will use RTS-CTS,
+ * but if there's a sw retry, both the rate
+ * series will use RTS-CTS.
+ * @WMI_RTSCTS_ERP: RTS/CTS used for ERP protection for every PPDU.
+ * @WMI_RTSCTS_FOR_ALL_RATESERIES: Enable RTS-CTS for all rate series.
+ */
+enum wmi_rtscts_profile {
+ WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+ WMI_RTSCTS_FOR_SECOND_RATESERIES = 1,
+ WMI_RTSCTS_ACROSS_SW_RETRIES = 2,
+ WMI_RTSCTS_ERP = 3,
+ WMI_RTSCTS_FOR_ALL_RATESERIES = 4,
+};
+
+struct ath11k_hal_reg_cap {
+ u32 eeprom_rd;
+ u32 eeprom_rd_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+};
+
+struct ath11k_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
+#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
+
+enum wmi_sta_ps_param_rx_wake_policy {
+ WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
+ WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
+};
+
+/* Do not change existing values! Used by ath11k_frame_mode parameter
+ * module parameter.
+ */
+enum ath11k_hw_txrx_mode {
+ ATH11K_HW_TXRX_RAW = 0,
+ ATH11K_HW_TXRX_NATIVE_WIFI = 1,
+ ATH11K_HW_TXRX_ETHERNET = 2,
+};
+
+struct wmi_wmm_params {
+ u32 tlv_header;
+ u32 cwmin;
+ u32 cwmax;
+ u32 aifs;
+ u32 txoplimit;
+ u32 acm;
+ u32 no_ack;
+} __packed;
+
+struct wmi_wmm_params_arg {
+ u8 acm;
+ u8 aifs;
+ u16 cwmin;
+ u16 cwmax;
+ u16 txop;
+ u8 no_ack;
+};
+
+struct wmi_vdev_set_wmm_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_wmm_params wmm_params[4];
+ u32 wmm_param_type;
+} __packed;
+
+struct wmi_wmm_params_all_arg {
+ struct wmi_wmm_params_arg ac_be;
+ struct wmi_wmm_params_arg ac_bk;
+ struct wmi_wmm_params_arg ac_vi;
+ struct wmi_wmm_params_arg ac_vo;
+};
+
+#define ATH11K_TWT_DEF_STA_CONG_TIMER_MS 5000
+#define ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE 10
+#define ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP 50
+#define ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN 20
+#define ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL 100
+#define ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN 80
+#define ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP 50
+#define ATH11K_TWT_DEF_MIN_NO_STA_SETUP 10
+#define ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN 2
+#define ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS 2
+#define ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS 2
+#define ATH11K_TWT_DEF_MAX_NO_STA_TWT 500
+#define ATH11K_TWT_DEF_MODE_CHECK_INTERVAL 10000
+#define ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL 1000
+#define ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL 5000
+
+struct wmi_twt_enable_params {
+ u32 sta_cong_timer_ms;
+ u32 mbss_support;
+ u32 default_slot_size;
+ u32 congestion_thresh_setup;
+ u32 congestion_thresh_teardown;
+ u32 congestion_thresh_critical;
+ u32 interference_thresh_teardown;
+ u32 interference_thresh_setup;
+ u32 min_no_sta_setup;
+ u32 min_no_sta_teardown;
+ u32 no_of_bcast_mcast_slots;
+ u32 min_no_twt_slots;
+ u32 max_no_sta_twt;
+ u32 mode_check_interval;
+ u32 add_sta_slot_interval;
+ u32 remove_sta_slot_interval;
+};
+
+struct wmi_twt_enable_params_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 sta_cong_timer_ms;
+ u32 mbss_support;
+ u32 default_slot_size;
+ u32 congestion_thresh_setup;
+ u32 congestion_thresh_teardown;
+ u32 congestion_thresh_critical;
+ u32 interference_thresh_teardown;
+ u32 interference_thresh_setup;
+ u32 min_no_sta_setup;
+ u32 min_no_sta_teardown;
+ u32 no_of_bcast_mcast_slots;
+ u32 min_no_twt_slots;
+ u32 max_no_sta_twt;
+ u32 mode_check_interval;
+ u32 add_sta_slot_interval;
+ u32 remove_sta_slot_interval;
+} __packed;
+
+struct wmi_twt_disable_params_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+enum WMI_HOST_TWT_COMMAND {
+ WMI_HOST_TWT_COMMAND_REQUEST_TWT = 0,
+ WMI_HOST_TWT_COMMAND_SUGGEST_TWT,
+ WMI_HOST_TWT_COMMAND_DEMAND_TWT,
+ WMI_HOST_TWT_COMMAND_TWT_GROUPING,
+ WMI_HOST_TWT_COMMAND_ACCEPT_TWT,
+ WMI_HOST_TWT_COMMAND_ALTERNATE_TWT,
+ WMI_HOST_TWT_COMMAND_DICTATE_TWT,
+ WMI_HOST_TWT_COMMAND_REJECT_TWT,
+};
+
+#define WMI_TWT_ADD_DIALOG_FLAG_BCAST BIT(8)
+#define WMI_TWT_ADD_DIALOG_FLAG_TRIGGER BIT(9)
+#define WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE BIT(10)
+#define WMI_TWT_ADD_DIALOG_FLAG_PROTECTION BIT(11)
+
+struct wmi_twt_add_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+ u32 wake_intvl_us;
+ u32 wake_intvl_mantis;
+ u32 wake_dura_us;
+ u32 sp_offset_us;
+ u32 flags;
+} __packed;
+
+struct wmi_twt_add_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+ u32 wake_intvl_us;
+ u32 wake_intvl_mantis;
+ u32 wake_dura_us;
+ u32 sp_offset_us;
+ u8 twt_cmd;
+ u8 flag_bcast;
+ u8 flag_trigger;
+ u8 flag_flow_type;
+ u8 flag_protection;
+} __packed;
+
+enum wmi_twt_add_dialog_status {
+ WMI_ADD_TWT_STATUS_OK,
+ WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED,
+ WMI_ADD_TWT_STATUS_USED_DIALOG_ID,
+ WMI_ADD_TWT_STATUS_INVALID_PARAM,
+ WMI_ADD_TWT_STATUS_NOT_READY,
+ WMI_ADD_TWT_STATUS_NO_RESOURCE,
+ WMI_ADD_TWT_STATUS_NO_ACK,
+ WMI_ADD_TWT_STATUS_NO_RESPONSE,
+ WMI_ADD_TWT_STATUS_DENIED,
+ WMI_ADD_TWT_STATUS_UNKNOWN_ERROR,
+};
+
+struct wmi_twt_add_dialog_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+ u32 status;
+} __packed;
+
+struct wmi_twt_del_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_del_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_resume_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+ u32 sp_offset_us;
+ u32 next_twt_size;
+} __packed;
+
+struct wmi_twt_resume_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+ u32 sp_offset_us;
+ u32 next_twt_size;
+} __packed;
+
+struct wmi_obss_spatial_reuse_params_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+ s32 obss_min;
+ s32 obss_max;
+ u32 vdev_id;
+} __packed;
+
+struct wmi_pdev_obss_pd_bitmap_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 bitmap[2];
+} __packed;
+
+#define ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS 200
+#define ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE 0
+#define ATH11K_OBSS_COLOR_COLLISION_DETECTION 1
+
+#define ATH11K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS 10000
+#define ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS 5000
+
+enum wmi_bss_color_collision {
+ WMI_BSS_COLOR_COLLISION_DISABLE = 0,
+ WMI_BSS_COLOR_COLLISION_DETECTION,
+ WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY,
+ WMI_BSS_COLOR_FREE_SLOT_AVAILABLE,
+};
+
+struct wmi_obss_color_collision_cfg_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 flags;
+ u32 evt_type;
+ u32 current_bss_color;
+ u32 detection_period_ms;
+ u32 scan_period_ms;
+ u32 free_slot_expiry_time_ms;
+} __packed;
+
+struct wmi_bss_color_change_enable_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 enable;
+} __packed;
+
+struct wmi_obss_color_collision_event {
+ u32 vdev_id;
+ u32 evt_type;
+ u64 obss_color_bitmap;
+} __packed;
+
+#define ATH11K_IPV4_TH_SEED_SIZE 5
+#define ATH11K_IPV6_TH_SEED_SIZE 11
+
+struct ath11k_wmi_pdev_lro_config_cmd {
+ u32 tlv_header;
+ u32 lro_enable;
+ u32 res;
+ u32 th_4[ATH11K_IPV4_TH_SEED_SIZE];
+ u32 th_6[ATH11K_IPV6_TH_SEED_SIZE];
+ u32 pdev_id;
+} __packed;
+
+#define ATH11K_WMI_SPECTRAL_COUNT_DEFAULT 0
+#define ATH11K_WMI_SPECTRAL_PERIOD_DEFAULT 224
+#define ATH11K_WMI_SPECTRAL_PRIORITY_DEFAULT 1
+#define ATH11K_WMI_SPECTRAL_FFT_SIZE_DEFAULT 7
+#define ATH11K_WMI_SPECTRAL_GC_ENA_DEFAULT 1
+#define ATH11K_WMI_SPECTRAL_RESTART_ENA_DEFAULT 0
+#define ATH11K_WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96
+#define ATH11K_WMI_SPECTRAL_INIT_DELAY_DEFAULT 80
+#define ATH11K_WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12
+#define ATH11K_WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8
+#define ATH11K_WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0
+#define ATH11K_WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0
+#define ATH11K_WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0
+#define ATH11K_WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0
+#define ATH11K_WMI_SPECTRAL_RPT_MODE_DEFAULT 2
+#define ATH11K_WMI_SPECTRAL_BIN_SCALE_DEFAULT 1
+#define ATH11K_WMI_SPECTRAL_DBM_ADJ_DEFAULT 1
+#define ATH11K_WMI_SPECTRAL_CHN_MASK_DEFAULT 1
+
+struct ath11k_wmi_vdev_spectral_conf_param {
+ u32 vdev_id;
+ u32 scan_count;
+ u32 scan_period;
+ u32 scan_priority;
+ u32 scan_fft_size;
+ u32 scan_gc_ena;
+ u32 scan_restart_ena;
+ u32 scan_noise_floor_ref;
+ u32 scan_init_delay;
+ u32 scan_nb_tone_thr;
+ u32 scan_str_bin_thr;
+ u32 scan_wb_rpt_mode;
+ u32 scan_rssi_rpt_mode;
+ u32 scan_rssi_thr;
+ u32 scan_pwr_format;
+ u32 scan_rpt_mode;
+ u32 scan_bin_scale;
+ u32 scan_dbm_adj;
+ u32 scan_chn_mask;
+} __packed;
+
+struct ath11k_wmi_vdev_spectral_conf_cmd {
+ u32 tlv_header;
+ struct ath11k_wmi_vdev_spectral_conf_param param;
+} __packed;
+
+#define ATH11K_WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1
+#define ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2
+#define ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE 1
+#define ATH11K_WMI_SPECTRAL_ENABLE_CMD_DISABLE 2
+
+struct ath11k_wmi_vdev_spectral_enable_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 trigger_cmd;
+ u32 enable_cmd;
+} __packed;
+
+struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 module_id; /* see enum wmi_direct_buffer_module */
+ u32 base_paddr_lo;
+ u32 base_paddr_hi;
+ u32 head_idx_paddr_lo;
+ u32 head_idx_paddr_hi;
+ u32 tail_idx_paddr_lo;
+ u32 tail_idx_paddr_hi;
+ u32 num_elems; /* Number of elems in the ring */
+ u32 buf_size; /* size of allocated buffer in bytes */
+
+ /* Number of wmi_dma_buf_release_entry packed together */
+ u32 num_resp_per_event;
+
+ /* Target should timeout and send whatever resp
+ * it has if this time expires, units in milliseconds
+ */
+ u32 event_timeout_ms;
+} __packed;
+
+struct ath11k_wmi_dma_buf_release_fixed_param {
+ u32 pdev_id;
+ u32 module_id;
+ u32 num_buf_release_entry;
+ u32 num_meta_data_entry;
+} __packed;
+
+struct wmi_dma_buf_release_entry {
+ u32 tlv_header;
+ u32 paddr_lo;
+
+ /* Bits 11:0: address of data
+ * Bits 31:12: host context data
+ */
+ u32 paddr_hi;
+} __packed;
+
+#define WMI_SPECTRAL_META_INFO1_FREQ1 GENMASK(15, 0)
+#define WMI_SPECTRAL_META_INFO1_FREQ2 GENMASK(31, 16)
+
+#define WMI_SPECTRAL_META_INFO2_CHN_WIDTH GENMASK(7, 0)
+
+struct wmi_dma_buf_release_meta_data {
+ u32 tlv_header;
+ s32 noise_floor[WMI_MAX_CHAINS];
+ u32 reset_delay;
+ u32 freq1;
+ u32 freq2;
+ u32 ch_width;
+} __packed;
+
+enum wmi_fils_discovery_cmd_type {
+ WMI_FILS_DISCOVERY_CMD,
+ WMI_UNSOL_BCAST_PROBE_RESP,
+};
+
+struct wmi_fils_discovery_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 interval;
+ u32 config; /* enum wmi_fils_discovery_cmd_type */
+} __packed;
+
+struct wmi_fils_discovery_tmpl_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 buf_len;
+} __packed;
+
+struct wmi_probe_tmpl_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 buf_len;
+} __packed;
+
+struct target_resource_config {
+ u32 num_vdevs;
+ u32 num_peers;
+ u32 num_active_peers;
+ u32 num_offload_peers;
+ u32 num_offload_reorder_buffs;
+ u32 num_peer_keys;
+ u32 num_tids;
+ u32 ast_skid_limit;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 rx_timeout_pri[4];
+ u32 rx_decap_mode;
+ u32 scan_max_pending_req;
+ u32 bmiss_offload_max_vdev;
+ u32 roam_offload_max_vdev;
+ u32 roam_offload_max_ap_profiles;
+ u32 num_mcast_groups;
+ u32 num_mcast_table_elems;
+ u32 mcast2ucast_mode;
+ u32 tx_dbg_log_size;
+ u32 num_wds_entries;
+ u32 dma_burst_size;
+ u32 mac_aggr_delim;
+ u32 rx_skip_defrag_timeout_dup_detection_check;
+ u32 vow_config;
+ u32 gtk_offload_max_vdev;
+ u32 num_msdu_desc;
+ u32 max_frag_entries;
+ u32 max_peer_ext_stats;
+ u32 smart_ant_cap;
+ u32 bk_minfree;
+ u32 be_minfree;
+ u32 vi_minfree;
+ u32 vo_minfree;
+ u32 rx_batchmode;
+ u32 tt_support;
+ u32 flag1;
+ u32 iphdr_pad_config;
+ u32 qwrap_config:16,
+ alloc_frag_desc_for_data_pkt:16;
+ u32 num_tdls_vdevs;
+ u32 num_tdls_conn_table_entries;
+ u32 beacon_tx_offload_max_vdev;
+ u32 num_multicast_filter_entries;
+ u32 num_wow_filters;
+ u32 num_keep_alive_pattern;
+ u32 keep_alive_pattern_size;
+ u32 max_tdls_concurrent_sleep_sta;
+ u32 max_tdls_concurrent_buffer_sta;
+ u32 wmi_send_separate;
+ u32 num_ocb_vdevs;
+ u32 num_ocb_channels;
+ u32 num_ocb_schedules;
+ u32 num_ns_ext_tuples_cfg;
+ u32 bpf_instruction_size;
+ u32 max_bssid_rx_filters;
+ u32 use_pdev_id;
+ u32 peer_map_unmap_v2_support;
+ u32 sched_params;
+ u32 twt_ap_pdev_count;
+ u32 twt_ap_sta_count;
+ u8 is_reg_cc_ext_event_supported;
+ u32 ema_max_vap_cnt;
+ u32 ema_max_profile_period;
+};
+
+enum wmi_debug_log_param {
+ WMI_DEBUG_LOG_PARAM_LOG_LEVEL = 0x1,
+ WMI_DEBUG_LOG_PARAM_VDEV_ENABLE,
+ WMI_DEBUG_LOG_PARAM_VDEV_DISABLE,
+ WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP,
+ WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP,
+ WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP,
+};
+
+struct wmi_debug_log_config_cmd_fixed_param {
+ u32 tlv_header;
+ u32 dbg_log_param;
+ u32 value;
+} __packed;
+
+#define MAX_RADIOS 3
+
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
+#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
+
+enum ath11k_wmi_peer_ps_state {
+ WMI_PEER_PS_STATE_OFF,
+ WMI_PEER_PS_STATE_ON,
+ WMI_PEER_PS_STATE_DISABLED,
+};
+
+enum wmi_peer_ps_supported_bitmap {
+ /* Used to indicate that power save state change is valid */
+ WMI_PEER_PS_VALID = 0x1,
+ WMI_PEER_PS_STATE_TIMESTAMP = 0x2,
+};
+
+struct wmi_peer_sta_ps_state_chg_event {
+ struct wmi_mac_addr peer_macaddr;
+ u32 peer_ps_state;
+ u32 ps_supported_bitmap;
+ u32 peer_ps_valid;
+ u32 peer_ps_timestamp;
+} __packed;
+
+struct ath11k_wmi_base {
+ struct ath11k_base *ab;
+ struct ath11k_pdev_wmi wmi[MAX_RADIOS];
+ enum ath11k_htc_ep_id wmi_endpoint_id[MAX_RADIOS];
+ u32 max_msg_len[MAX_RADIOS];
+
+ struct completion service_ready;
+ struct completion unified_ready;
+ DECLARE_BITMAP(svc_map, WMI_MAX_EXT2_SERVICE);
+ wait_queue_head_t tx_credits_wq;
+ u32 num_mem_chunks;
+ u32 rx_decap_mode;
+ struct wmi_host_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
+
+ enum wmi_host_hw_mode_config_type preferred_hw_mode;
+ struct target_resource_config wlan_resource_config;
+
+ struct ath11k_targ_cap *targ_cap;
+};
+
+/* Definition of HW data filtering */
+enum hw_data_filter_type {
+ WMI_HW_DATA_FILTER_DROP_NON_ARP_BC = BIT(0),
+ WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC = BIT(1),
+};
+
+struct wmi_hw_data_filter_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 enable;
+ u32 hw_filter_bitmap;
+} __packed;
+
+/* WOW structures */
+enum wmi_wow_wakeup_event {
+ WOW_BMISS_EVENT = 0,
+ WOW_BETTER_AP_EVENT,
+ WOW_DEAUTH_RECVD_EVENT,
+ WOW_MAGIC_PKT_RECVD_EVENT,
+ WOW_GTK_ERR_EVENT,
+ WOW_FOURWAY_HSHAKE_EVENT,
+ WOW_EAPOL_RECVD_EVENT,
+ WOW_NLO_DETECTED_EVENT,
+ WOW_DISASSOC_RECVD_EVENT,
+ WOW_PATTERN_MATCH_EVENT,
+ WOW_CSA_IE_EVENT,
+ WOW_PROBE_REQ_WPS_IE_EVENT,
+ WOW_AUTH_REQ_EVENT,
+ WOW_ASSOC_REQ_EVENT,
+ WOW_HTT_EVENT,
+ WOW_RA_MATCH_EVENT,
+ WOW_HOST_AUTO_SHUTDOWN_EVENT,
+ WOW_IOAC_MAGIC_EVENT,
+ WOW_IOAC_SHORT_EVENT,
+ WOW_IOAC_EXTEND_EVENT,
+ WOW_IOAC_TIMER_EVENT,
+ WOW_DFS_PHYERR_RADAR_EVENT,
+ WOW_BEACON_EVENT,
+ WOW_CLIENT_KICKOUT_EVENT,
+ WOW_EVENT_MAX,
+};
+
+enum wmi_wow_interface_cfg {
+ WOW_IFACE_PAUSE_ENABLED,
+ WOW_IFACE_PAUSE_DISABLED
+};
+
+#define C2S(x) case x: return #x
+
+static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
+{
+ switch (ev) {
+ C2S(WOW_BMISS_EVENT);
+ C2S(WOW_BETTER_AP_EVENT);
+ C2S(WOW_DEAUTH_RECVD_EVENT);
+ C2S(WOW_MAGIC_PKT_RECVD_EVENT);
+ C2S(WOW_GTK_ERR_EVENT);
+ C2S(WOW_FOURWAY_HSHAKE_EVENT);
+ C2S(WOW_EAPOL_RECVD_EVENT);
+ C2S(WOW_NLO_DETECTED_EVENT);
+ C2S(WOW_DISASSOC_RECVD_EVENT);
+ C2S(WOW_PATTERN_MATCH_EVENT);
+ C2S(WOW_CSA_IE_EVENT);
+ C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
+ C2S(WOW_AUTH_REQ_EVENT);
+ C2S(WOW_ASSOC_REQ_EVENT);
+ C2S(WOW_HTT_EVENT);
+ C2S(WOW_RA_MATCH_EVENT);
+ C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
+ C2S(WOW_IOAC_MAGIC_EVENT);
+ C2S(WOW_IOAC_SHORT_EVENT);
+ C2S(WOW_IOAC_EXTEND_EVENT);
+ C2S(WOW_IOAC_TIMER_EVENT);
+ C2S(WOW_DFS_PHYERR_RADAR_EVENT);
+ C2S(WOW_BEACON_EVENT);
+ C2S(WOW_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_EVENT_MAX);
+ default:
+ return NULL;
+ }
+}
+
+enum wmi_wow_wake_reason {
+ WOW_REASON_UNSPECIFIED = -1,
+ WOW_REASON_NLOD = 0,
+ WOW_REASON_AP_ASSOC_LOST,
+ WOW_REASON_LOW_RSSI,
+ WOW_REASON_DEAUTH_RECVD,
+ WOW_REASON_DISASSOC_RECVD,
+ WOW_REASON_GTK_HS_ERR,
+ WOW_REASON_EAP_REQ,
+ WOW_REASON_FOURWAY_HS_RECV,
+ WOW_REASON_TIMER_INTR_RECV,
+ WOW_REASON_PATTERN_MATCH_FOUND,
+ WOW_REASON_RECV_MAGIC_PATTERN,
+ WOW_REASON_P2P_DISC,
+ WOW_REASON_WLAN_HB,
+ WOW_REASON_CSA_EVENT,
+ WOW_REASON_PROBE_REQ_WPS_IE_RECV,
+ WOW_REASON_AUTH_REQ_RECV,
+ WOW_REASON_ASSOC_REQ_RECV,
+ WOW_REASON_HTT_EVENT,
+ WOW_REASON_RA_MATCH,
+ WOW_REASON_HOST_AUTO_SHUTDOWN,
+ WOW_REASON_IOAC_MAGIC_EVENT,
+ WOW_REASON_IOAC_SHORT_EVENT,
+ WOW_REASON_IOAC_EXTEND_EVENT,
+ WOW_REASON_IOAC_TIMER_EVENT,
+ WOW_REASON_ROAM_HO,
+ WOW_REASON_DFS_PHYERR_RADADR_EVENT,
+ WOW_REASON_BEACON_RECV,
+ WOW_REASON_CLIENT_KICKOUT_EVENT,
+ WOW_REASON_PAGE_FAULT = 0x3a,
+ WOW_REASON_DEBUG_TEST = 0xFF,
+};
+
+static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
+{
+ switch (reason) {
+ C2S(WOW_REASON_UNSPECIFIED);
+ C2S(WOW_REASON_NLOD);
+ C2S(WOW_REASON_AP_ASSOC_LOST);
+ C2S(WOW_REASON_LOW_RSSI);
+ C2S(WOW_REASON_DEAUTH_RECVD);
+ C2S(WOW_REASON_DISASSOC_RECVD);
+ C2S(WOW_REASON_GTK_HS_ERR);
+ C2S(WOW_REASON_EAP_REQ);
+ C2S(WOW_REASON_FOURWAY_HS_RECV);
+ C2S(WOW_REASON_TIMER_INTR_RECV);
+ C2S(WOW_REASON_PATTERN_MATCH_FOUND);
+ C2S(WOW_REASON_RECV_MAGIC_PATTERN);
+ C2S(WOW_REASON_P2P_DISC);
+ C2S(WOW_REASON_WLAN_HB);
+ C2S(WOW_REASON_CSA_EVENT);
+ C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
+ C2S(WOW_REASON_AUTH_REQ_RECV);
+ C2S(WOW_REASON_ASSOC_REQ_RECV);
+ C2S(WOW_REASON_HTT_EVENT);
+ C2S(WOW_REASON_RA_MATCH);
+ C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
+ C2S(WOW_REASON_IOAC_MAGIC_EVENT);
+ C2S(WOW_REASON_IOAC_SHORT_EVENT);
+ C2S(WOW_REASON_IOAC_EXTEND_EVENT);
+ C2S(WOW_REASON_IOAC_TIMER_EVENT);
+ C2S(WOW_REASON_ROAM_HO);
+ C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
+ C2S(WOW_REASON_BEACON_RECV);
+ C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_REASON_PAGE_FAULT);
+ C2S(WOW_REASON_DEBUG_TEST);
+ default:
+ return NULL;
+ }
+}
+
+#undef C2S
+
+struct wmi_wow_ev_arg {
+ u32 vdev_id;
+ u32 flag;
+ enum wmi_wow_wake_reason wake_reason;
+ u32 data_len;
+};
+
+enum wmi_tlv_pattern_type {
+ WOW_PATTERN_MIN = 0,
+ WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+ WOW_IPV4_SYNC_PATTERN,
+ WOW_IPV6_SYNC_PATTERN,
+ WOW_WILD_CARD_PATTERN,
+ WOW_TIMER_PATTERN,
+ WOW_MAGIC_PATTERN,
+ WOW_IPV6_RA_PATTERN,
+ WOW_IOAC_PKT_PATTERN,
+ WOW_IOAC_TMR_PATTERN,
+ WOW_PATTERN_MAX
+};
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
+#define WOW_DEFAULT_BITMASK_SIZE 148
+
+#define WOW_MIN_PATTERN_SIZE 1
+#define WOW_MAX_PATTERN_SIZE 148
+#define WOW_MAX_PKT_OFFSET 128
+#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \
+ sizeof(struct rfc1042_hdr))
+#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \
+ offsetof(struct ieee80211_hdr_3addr, addr1))
+
+struct wmi_wow_add_del_event_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 is_add;
+ u32 event_bitmap;
+} __packed;
+
+struct wmi_wow_enable_cmd {
+ u32 tlv_header;
+ u32 enable;
+ u32 pause_iface_config;
+ u32 flags;
+} __packed;
+
+struct wmi_wow_host_wakeup_ind {
+ u32 tlv_header;
+ u32 reserved;
+} __packed;
+
+struct wmi_tlv_wow_event_info {
+ u32 vdev_id;
+ u32 flag;
+ u32 wake_reason;
+ u32 data_len;
+} __packed;
+
+struct wmi_wow_bitmap_pattern {
+ u32 tlv_header;
+ u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+ u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+ u32 pattern_offset;
+ u32 pattern_len;
+ u32 bitmask_len;
+ u32 pattern_id;
+} __packed;
+
+struct wmi_wow_add_pattern_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 pattern_id;
+ u32 pattern_type;
+} __packed;
+
+struct wmi_wow_del_pattern_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 pattern_id;
+ u32 pattern_type;
+} __packed;
+
+#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
+#define WMI_PNO_MAX_NETW_CHANNELS 26
+#define WMI_PNO_MAX_NETW_CHANNELS_EX 60
+#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID
+#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN
+
+/* size based of dot11 declaration without extra IEs as we will not carry those for PNO */
+#define WMI_PNO_MAX_PB_REQ_SIZE 450
+
+#define WMI_PNO_24G_DEFAULT_CH 1
+#define WMI_PNO_5G_DEFAULT_CH 36
+
+#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
+#define WMI_PASSIVE_MAX_CHANNEL_TIME 110
+
+/* SSID broadcast type */
+enum wmi_ssid_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
+};
+
+#define WMI_NLO_MAX_SSIDS 16
+#define WMI_NLO_MAX_CHAN 48
+
+#define WMI_NLO_CONFIG_STOP BIT(0)
+#define WMI_NLO_CONFIG_START BIT(1)
+#define WMI_NLO_CONFIG_RESET BIT(2)
+#define WMI_NLO_CONFIG_SLOW_SCAN BIT(4)
+#define WMI_NLO_CONFIG_FAST_SCAN BIT(5)
+#define WMI_NLO_CONFIG_SSID_HIDE_EN BIT(6)
+
+/* This bit is used to indicate if EPNO or supplicant PNO is enabled.
+ * Only one of them can be enabled at a given time
+ */
+#define WMI_NLO_CONFIG_ENLO BIT(7)
+#define WMI_NLO_CONFIG_SCAN_PASSIVE BIT(8)
+#define WMI_NLO_CONFIG_ENLO_RESET BIT(9)
+#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ BIT(10)
+#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ BIT(11)
+#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ BIT(12)
+#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG BIT(13)
+
+struct wmi_nlo_ssid_param {
+ u32 valid;
+ struct wmi_ssid ssid;
+} __packed;
+
+struct wmi_nlo_enc_param {
+ u32 valid;
+ u32 enc_type;
+} __packed;
+
+struct wmi_nlo_auth_param {
+ u32 valid;
+ u32 auth_type;
+} __packed;
+
+struct wmi_nlo_bcast_nw_param {
+ u32 valid;
+ u32 bcast_nw_type;
+} __packed;
+
+struct wmi_nlo_rssi_param {
+ u32 valid;
+ s32 rssi;
+} __packed;
+
+struct nlo_configured_parameters {
+ /* TLV tag and len;*/
+ u32 tlv_header;
+ struct wmi_nlo_ssid_param ssid;
+ struct wmi_nlo_enc_param enc_type;
+ struct wmi_nlo_auth_param auth_type;
+ struct wmi_nlo_rssi_param rssi_cond;
+
+ /* indicates if the SSID is hidden or not */
+ struct wmi_nlo_bcast_nw_param bcast_nw_type;
+} __packed;
+
+struct wmi_network_type {
+ struct wmi_ssid ssid;
+ u32 authentication;
+ u32 encryption;
+ u32 bcast_nw_type;
+ u8 channel_count;
+ u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
+ s32 rssi_threshold;
+};
+
+struct wmi_pno_scan_req {
+ u8 enable;
+ u8 vdev_id;
+ u8 uc_networks_count;
+ struct wmi_network_type a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
+ u32 fast_scan_period;
+ u32 slow_scan_period;
+ u8 fast_scan_max_cycles;
+
+ bool do_passive_scan;
+
+ u32 delay_start_time;
+ u32 active_min_time;
+ u32 active_max_time;
+ u32 passive_min_time;
+ u32 passive_max_time;
+
+ /* mac address randomization attributes */
+ u32 enable_pno_scan_randomization;
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+};
+
+struct wmi_wow_nlo_config_cmd {
+ u32 tlv_header;
+ u32 flags;
+ u32 vdev_id;
+ u32 fast_scan_max_cycles;
+ u32 active_dwell_time;
+ u32 passive_dwell_time;
+ u32 probe_bundle_size;
+
+ /* ART = IRT */
+ u32 rest_time;
+
+ /* Max value that can be reached after SBM */
+ u32 max_rest_time;
+
+ /* SBM */
+ u32 scan_backoff_multiplier;
+
+ /* SCBM */
+ u32 fast_scan_period;
+
+ /* specific to windows */
+ u32 slow_scan_period;
+
+ u32 no_of_ssids;
+
+ u32 num_of_channels;
+
+ /* NLO scan start delay time in milliseconds */
+ u32 delay_start_time;
+
+ /* MAC Address to use in Probe Req as SA */
+ struct wmi_mac_addr mac_addr;
+
+ /* Mask on which MAC has to be randomized */
+ struct wmi_mac_addr mac_mask;
+
+ /* IE bitmap to use in Probe Req */
+ u32 ie_bitmap[8];
+
+ /* Number of vendor OUIs. In the TLV vendor_oui[] */
+ u32 num_vendor_oui;
+
+ /* Number of connected NLO band preferences */
+ u32 num_cnlo_band_pref;
+
+ /* The TLVs will follow.
+ * nlo_configured_parameters nlo_list[];
+ * u32 channel_list[num_of_channels];
+ */
+} __packed;
+
+#define WMI_MAX_NS_OFFLOADS 2
+#define WMI_MAX_ARP_OFFLOADS 2
+
+#define WMI_ARPOL_FLAGS_VALID BIT(0)
+#define WMI_ARPOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_ARPOL_FLAGS_REMOTE_IP_VALID BIT(2)
+
+struct wmi_arp_offload_tuple {
+ u32 tlv_header;
+ u32 flags;
+ u8 target_ipaddr[4];
+ u8 remote_ipaddr[4];
+ struct wmi_mac_addr target_mac;
+} __packed;
+
+#define WMI_NSOL_FLAGS_VALID BIT(0)
+#define WMI_NSOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_NSOL_FLAGS_REMOTE_IP_VALID BIT(2)
+#define WMI_NSOL_FLAGS_IS_IPV6_ANYCAST BIT(3)
+
+#define WMI_NSOL_MAX_TARGET_IPS 2
+
+struct wmi_ns_offload_tuple {
+ u32 tlv_header;
+ u32 flags;
+ u8 target_ipaddr[WMI_NSOL_MAX_TARGET_IPS][16];
+ u8 solicitation_ipaddr[16];
+ u8 remote_ipaddr[16];
+ struct wmi_mac_addr target_mac;
+} __packed;
+
+struct wmi_set_arp_ns_offload_cmd {
+ u32 tlv_header;
+ u32 flags;
+ u32 vdev_id;
+ u32 num_ns_ext_tuples;
+ /* The TLVs follow:
+ * wmi_ns_offload_tuple ns_tuples[WMI_MAX_NS_OFFLOADS];
+ * wmi_arp_offload_tuple arp_tuples[WMI_MAX_ARP_OFFLOADS];
+ * wmi_ns_offload_tuple ns_ext_tuples[num_ns_ext_tuples];
+ */
+} __packed;
+
+#define GTK_OFFLOAD_OPCODE_MASK 0xFF000000
+#define GTK_OFFLOAD_ENABLE_OPCODE 0x01000000
+#define GTK_OFFLOAD_DISABLE_OPCODE 0x02000000
+#define GTK_OFFLOAD_REQUEST_STATUS_OPCODE 0x04000000
+
+#define GTK_OFFLOAD_KEK_BYTES 16
+#define GTK_OFFLOAD_KCK_BYTES 16
+#define GTK_REPLAY_COUNTER_BYTES 8
+#define WMI_MAX_KEY_LEN 32
+#define IGTK_PN_SIZE 6
+
+struct wmi_replayc_cnt {
+ union {
+ u8 counter[GTK_REPLAY_COUNTER_BYTES];
+ struct {
+ u32 word0;
+ u32 word1;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_gtk_offload_status_event {
+ u32 vdev_id;
+ u32 flags;
+ u32 refresh_cnt;
+ struct wmi_replayc_cnt replay_ctr;
+ u8 igtk_key_index;
+ u8 igtk_key_length;
+ u8 igtk_key_rsc[IGTK_PN_SIZE];
+ u8 igtk_key[WMI_MAX_KEY_LEN];
+ u8 gtk_key_index;
+ u8 gtk_key_length;
+ u8 gtk_key_rsc[GTK_REPLAY_COUNTER_BYTES];
+ u8 gtk_key[WMI_MAX_KEY_LEN];
+} __packed;
+
+struct wmi_gtk_rekey_offload_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 flags;
+ u8 kek[GTK_OFFLOAD_KEK_BYTES];
+ u8 kck[GTK_OFFLOAD_KCK_BYTES];
+ u8 replay_ctr[GTK_REPLAY_COUNTER_BYTES];
+} __packed;
+
+#define BIOS_SAR_TABLE_LEN (22)
+#define BIOS_SAR_RSVD1_LEN (6)
+#define BIOS_SAR_RSVD2_LEN (18)
+
+struct wmi_pdev_set_sar_table_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 sar_len;
+ u32 rsvd_len;
+} __packed;
+
+struct wmi_pdev_set_geo_table_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 rsvd_len;
+} __packed;
+
+struct wmi_sta_keepalive_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 enabled;
+
+ /* WMI_STA_KEEPALIVE_METHOD_ */
+ u32 method;
+
+ /* in seconds */
+ u32 interval;
+
+ /* following this structure is the TLV for struct
+ * wmi_sta_keepalive_arp_resp
+ */
+} __packed;
+
+struct wmi_sta_keepalive_arp_resp {
+ u32 tlv_header;
+ u32 src_ip4_addr;
+ u32 dest_ip4_addr;
+ struct wmi_mac_addr dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_arg {
+ u32 vdev_id;
+ u32 enabled;
+ u32 method;
+ u32 interval;
+ u32 src_ip4_addr;
+ u32 dest_ip4_addr;
+ const u8 dest_mac_addr[ETH_ALEN];
+};
+
+enum wmi_sta_keepalive_method {
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+ WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE = 2,
+ WMI_STA_KEEPALIVE_METHOD_ETHERNET_LOOPBACK = 3,
+ WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST = 4,
+ WMI_STA_KEEPALIVE_METHOD_MGMT_VENDOR_ACTION = 5,
+};
+
+#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
+#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+
+enum wmi_wmm_params_type {
+ WMI_WMM_PARAM_TYPE_LEGACY = 0,
+ WMI_WMM_PARAM_TYPE_11AX_MU_EDCA = 1,
+};
+
+const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab,
+ struct sk_buff *skb, gfp_t gfp);
+int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
+ u32 cmd_id);
+struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
+int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
+ struct sk_buff *frame);
+int ath11k_wmi_p2p_go_bcn_ie(struct ath11k *ar, u32 vdev_id,
+ const u8 *p2p_ie);
+int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_mutable_offsets *offs,
+ struct sk_buff *bcn, u32 ema_param);
+int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id);
+int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid, u8 *tx_bssid, u32 nontx_profile_idx,
+ u32 nontx_profile_cnt);
+int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id);
+int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
+ bool restart);
+int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr,
+ u32 vdev_id, u32 param_id, u32 param_val);
+int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
+ u32 param_value, u8 pdev_id);
+int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id,
+ enum wmi_sta_ps_mode psmode);
+int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab);
+int ath11k_wmi_cmd_init(struct ath11k_base *ab);
+int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab);
+int ath11k_wmi_connect(struct ath11k_base *ab);
+int ath11k_wmi_pdev_attach(struct ath11k_base *ab,
+ u8 pdev_id);
+int ath11k_wmi_attach(struct ath11k_base *ab);
+void ath11k_wmi_detach(struct ath11k_base *ab);
+int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr,
+ struct vdev_create_params *param);
+int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar, int vdev_id,
+ const u8 *addr, dma_addr_t paddr,
+ u8 tid, u8 ba_window_size_valid,
+ u32 ba_window_size);
+int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar,
+ struct peer_create_params *param);
+int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value);
+
+int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id,
+ u32 param, u32 param_value);
+int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms);
+int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar,
+ const u8 *peer_addr, u8 vdev_id);
+int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id);
+void ath11k_wmi_start_scan_init(struct ath11k *ar, struct scan_req_params *arg);
+int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
+ struct scan_req_params *params);
+int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
+ struct scan_cancel_param *param);
+int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
+ struct wmi_wmm_params_all_arg *param,
+ enum wmi_wmm_params_type wmm_param_type);
+int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
+ u32 pdev_id);
+int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id);
+
+int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
+ struct peer_assoc_params *param);
+int ath11k_wmi_vdev_install_key(struct ath11k *ar,
+ struct wmi_vdev_install_key_arg *arg);
+int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
+ enum wmi_bss_chan_info_req_type type);
+int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
+ struct stats_request_params *param);
+int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k *ar);
+int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar,
+ u8 peer_addr[ETH_ALEN],
+ struct peer_flush_params *param);
+int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr,
+ struct ap_ps_params *param);
+int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
+ struct scan_chan_list_params *chan_list);
+int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
+ u32 pdev_id);
+int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac);
+int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size);
+int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status);
+int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason);
+int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
+ u32 vdev_id, u32 bcn_ctrl_op);
+int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar,
+ struct wmi_set_current_country_params *param);
+int
+ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
+ struct wmi_init_country_params init_cc_param);
+
+int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar,
+ struct wmi_11d_scan_start_params *param);
+int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id);
+
+int
+ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
+ struct thermal_mitigation_params *param);
+int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter);
+int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar);
+int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable);
+int
+ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar,
+ struct rx_reorder_queue_remove_params *param);
+int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar,
+ struct pdev_set_regdomain_params *param);
+int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
+ struct ath11k_fw_stats *stats);
+void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
+ struct ath11k_fw_stats *fw_stats, u32 stats_id,
+ char *buf);
+int ath11k_wmi_simulate_radar(struct ath11k *ar);
+void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params);
+int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id,
+ struct wmi_twt_enable_params *params);
+int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
+int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_add_dialog_params *params);
+int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_del_dialog_params *params);
+int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_pause_dialog_params *params);
+int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_resume_dialog_params *params);
+int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_he_obss_pd *he_obss_pd);
+int ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap);
+int ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath11k *ar, u32 *bitmap);
+int ath11k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath11k *ar,
+ u32 *bitmap);
+int ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath11k *ar,
+ u32 *bitmap);
+int ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath11k *ar,
+ u32 *bitmap);
+int ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath11k *ar,
+ u32 *bitmap);
+int ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
+ u8 bss_color, u32 period,
+ bool enable);
+int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
+ bool enable);
+int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar, int pdev_id);
+int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar,
+ struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param);
+int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id,
+ u32 trigger, u32 enable);
+int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar,
+ struct ath11k_wmi_vdev_spectral_conf_param *param);
+int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct sk_buff *tmpl);
+int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval,
+ bool unsol_bcast_probe_resp_enabled);
+int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct sk_buff *tmpl);
+int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
+ enum wmi_host_hw_mode_config_type mode);
+int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar);
+int ath11k_wmi_wow_enable(struct ath11k *ar);
+int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
+ const u8 mac_addr[ETH_ALEN]);
+int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
+ struct ath11k_fw_dbglog *dbglog);
+int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan);
+int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id);
+int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset);
+int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable);
+int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 filter_bitmap, bool enable);
+int ath11k_wmi_arp_ns_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable);
+int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable);
+int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar,
+ struct ath11k_vif *arvif);
+int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val);
+int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar);
+int ath11k_wmi_sta_keepalive(struct ath11k *ar,
+ const struct wmi_sta_keepalive_arg *arg);
+bool ath11k_wmi_supports_6ghz_cc_ext(struct ath11k *ar);
+int ath11k_wmi_send_vdev_set_tpc_power(struct ath11k *ar,
+ u32 vdev_id,
+ struct ath11k_reg_tpc_power_info *param);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c
new file mode 100644
index 000000000000..b6f08755129f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/wow.c
@@ -0,0 +1,883 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/delay.h>
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include "core.h"
+#include "hif.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wow.h"
+#include "dp_rx.h"
+
+static const struct wiphy_wowlan_support ath11k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE,
+ .pattern_min_len = WOW_MIN_PATTERN_SIZE,
+ .pattern_max_len = WOW_MAX_PATTERN_SIZE,
+ .max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
+
+int ath11k_wow_enable(struct ath11k_base *ab)
+{
+ struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
+ int i, ret;
+
+ clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
+
+ for (i = 0; i < ATH11K_WOW_RETRY_NUM; i++) {
+ reinit_completion(&ab->htc_suspend);
+
+ ret = ath11k_wmi_wow_enable(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to issue wow enable: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ab,
+ "timed out while waiting for htc suspend completion\n");
+ return -ETIMEDOUT;
+ }
+
+ if (test_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
+ /* success, suspend complete received */
+ return 0;
+
+ ath11k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
+ i);
+ msleep(ATH11K_WOW_RETRY_WAIT_MS);
+ }
+
+ ath11k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
+
+ return -ETIMEDOUT;
+}
+
+int ath11k_wow_wakeup(struct ath11k_base *ab)
+{
+ struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
+ int ret;
+
+ /* In the case of WCN6750, WoW wakeup is done
+ * by sending SMP2P power save exit message
+ * to the target processor.
+ */
+ if (ab->hw_params.smp2p_wow_exit)
+ return 0;
+
+ reinit_completion(&ab->wow.wakeup_completed);
+
+ ret = ath11k_wmi_wow_host_wakeup_ind(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to send wow wakeup indication: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ int i, ret;
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ar->wow.max_num_patterns; i++) {
+ ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
+ i, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_cleanup(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_wow_vif_cleanup(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Convert a 802.3 format to a 802.11 format.
+ * +------------+-----------+--------+----------------+
+ * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
+ * +------------+-----------+--------+----------------+
+ * |__ |_______ |____________ |________
+ * | | | |
+ * +--+------------+----+-----------+---------------+-----------+
+ * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
+ * +--+------------+----+-----------+---------------+-----------+
+ */
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
+ const struct cfg80211_pkt_pattern *old)
+{
+ u8 hdr_8023_pattern[ETH_HLEN] = {};
+ u8 hdr_8023_bit_mask[ETH_HLEN] = {};
+ u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
+ u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
+ u8 bytemask[WOW_MAX_PATTERN_SIZE] = {};
+
+ int total_len = old->pkt_offset + old->pattern_len;
+ int hdr_80211_end_offset;
+
+ struct ieee80211_hdr_3addr *new_hdr_pattern =
+ (struct ieee80211_hdr_3addr *)hdr_80211_pattern;
+ struct ieee80211_hdr_3addr *new_hdr_mask =
+ (struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
+ struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
+ struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
+ int hdr_len = sizeof(*new_hdr_pattern);
+
+ struct rfc1042_hdr *new_rfc_pattern =
+ (struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
+ struct rfc1042_hdr *new_rfc_mask =
+ (struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
+ int rfc_len = sizeof(*new_rfc_pattern);
+ int i;
+
+ /* convert bitmask to bytemask */
+ for (i = 0; i < old->pattern_len; i++)
+ if (old->mask[i / 8] & BIT(i % 8))
+ bytemask[i] = 0xff;
+
+ memcpy(hdr_8023_pattern + old->pkt_offset,
+ old->pattern, ETH_HLEN - old->pkt_offset);
+ memcpy(hdr_8023_bit_mask + old->pkt_offset,
+ bytemask, ETH_HLEN - old->pkt_offset);
+
+ /* Copy destination address */
+ memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
+ memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
+
+ /* Copy source address */
+ memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
+ memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
+
+ /* Copy logic link type */
+ memcpy(&new_rfc_pattern->snap_type,
+ &old_hdr_pattern->h_proto,
+ sizeof(old_hdr_pattern->h_proto));
+ memcpy(&new_rfc_mask->snap_type,
+ &old_hdr_mask->h_proto,
+ sizeof(old_hdr_mask->h_proto));
+
+ /* Compute new pkt_offset */
+ if (old->pkt_offset < ETH_ALEN)
+ new->pkt_offset = old->pkt_offset +
+ offsetof(struct ieee80211_hdr_3addr, addr1);
+ else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
+ new->pkt_offset = old->pkt_offset +
+ offsetof(struct ieee80211_hdr_3addr, addr3) -
+ offsetof(struct ethhdr, h_source);
+ else
+ new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
+
+ /* Compute new hdr end offset */
+ if (total_len > ETH_HLEN)
+ hdr_80211_end_offset = hdr_len + rfc_len;
+ else if (total_len > offsetof(struct ethhdr, h_proto))
+ hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
+ else if (total_len > ETH_ALEN)
+ hdr_80211_end_offset = total_len - ETH_ALEN +
+ offsetof(struct ieee80211_hdr_3addr, addr3);
+ else
+ hdr_80211_end_offset = total_len +
+ offsetof(struct ieee80211_hdr_3addr, addr1);
+
+ new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
+
+ memcpy((u8 *)new->pattern,
+ hdr_80211_pattern + new->pkt_offset,
+ new->pattern_len);
+ memcpy((u8 *)new->mask,
+ hdr_80211_bit_mask + new->pkt_offset,
+ new->pattern_len);
+
+ if (total_len > ETH_HLEN) {
+ /* Copy frame body */
+ memcpy((u8 *)new->pattern + new->pattern_len,
+ (void *)old->pattern + ETH_HLEN - old->pkt_offset,
+ total_len - ETH_HLEN);
+ memcpy((u8 *)new->mask + new->pattern_len,
+ bytemask + ETH_HLEN - old->pkt_offset,
+ total_len - ETH_HLEN);
+
+ new->pattern_len += total_len - ETH_HLEN;
+ }
+}
+
+static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id,
+ struct cfg80211_sched_scan_request *nd_config,
+ struct wmi_pno_scan_req *pno)
+{
+ int i, j;
+ u8 ssid_len;
+
+ pno->enable = 1;
+ pno->vdev_id = vdev_id;
+ pno->uc_networks_count = nd_config->n_match_sets;
+
+ if (!pno->uc_networks_count ||
+ pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
+ return -EINVAL;
+
+ if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
+ return -EINVAL;
+
+ /* Filling per profile params */
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ ssid_len = nd_config->match_sets[i].ssid.ssid_len;
+
+ if (ssid_len == 0 || ssid_len > 32)
+ return -EINVAL;
+
+ pno->a_networks[i].ssid.ssid_len = ssid_len;
+
+ memcpy(pno->a_networks[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid_len);
+ pno->a_networks[i].authentication = 0;
+ pno->a_networks[i].encryption = 0;
+ pno->a_networks[i].bcast_nw_type = 0;
+
+ /* Copying list of valid channel into request */
+ pno->a_networks[i].channel_count = nd_config->n_channels;
+ pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
+
+ for (j = 0; j < nd_config->n_channels; j++) {
+ pno->a_networks[i].channels[j] =
+ nd_config->channels[j]->center_freq;
+ }
+ }
+
+ /* set scan to passive if no SSIDs are specified in the request */
+ if (nd_config->n_ssids == 0)
+ pno->do_passive_scan = true;
+ else
+ pno->do_passive_scan = false;
+
+ for (i = 0; i < nd_config->n_ssids; i++) {
+ j = 0;
+ while (j < pno->uc_networks_count) {
+ if (pno->a_networks[j].ssid.ssid_len ==
+ nd_config->ssids[i].ssid_len &&
+ (memcmp(pno->a_networks[j].ssid.ssid,
+ nd_config->ssids[i].ssid,
+ pno->a_networks[j].ssid.ssid_len) == 0)) {
+ pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
+ break;
+ }
+ j++;
+ }
+ }
+
+ if (nd_config->n_scan_plans == 2) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
+ pno->slow_scan_period =
+ nd_config->scan_plans[1].interval * MSEC_PER_SEC;
+ } else if (nd_config->n_scan_plans == 1) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = 1;
+ pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ } else {
+ ath11k_warn(ar->ab, "Invalid number of scan plans %d !!",
+ nd_config->n_scan_plans);
+ }
+
+ if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ /* enable mac randomization */
+ pno->enable_pno_scan_randomization = 1;
+ memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
+ memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
+ }
+
+ pno->delay_start_time = nd_config->delay;
+
+ /* Current FW does not support min-max range for dwell time */
+ pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
+ pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
+
+ return 0;
+}
+
+static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
+ struct cfg80211_wowlan *wowlan)
+{
+ int ret, i;
+ unsigned long wow_mask = 0;
+ struct ath11k *ar = arvif->ar;
+ const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ int pattern_id = 0;
+
+ /* Setup requested WOW features */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_IBSS:
+ __set_bit(WOW_BEACON_EVENT, &wow_mask);
+ fallthrough;
+ case WMI_VDEV_TYPE_AP:
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+ __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_HTT_EVENT, &wow_mask);
+ __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (wowlan->disconnect) {
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_BMISS_EVENT, &wow_mask);
+ __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+ }
+
+ if (wowlan->magic_pkt)
+ __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+
+ if (wowlan->nd_config) {
+ struct wmi_pno_scan_req *pno;
+ int ret;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ ar->nlo_enabled = true;
+
+ ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id,
+ wowlan->nd_config, pno);
+ if (!ret) {
+ ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
+ }
+
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
+ u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ struct cfg80211_pkt_pattern new_pattern = {};
+
+ new_pattern.pattern = ath_pattern;
+ new_pattern.mask = ath_bitmask;
+ if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
+ continue;
+
+ if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
+ ATH11K_HW_TXRX_NATIVE_WIFI) {
+ if (patterns[i].pkt_offset < ETH_HLEN) {
+ ath11k_wow_convert_8023_to_80211(&new_pattern,
+ &patterns[i]);
+ } else {
+ int j;
+
+ new_pattern = patterns[i];
+ new_pattern.mask = ath_bitmask;
+
+ /* convert bitmask to bytemask */
+ for (j = 0; j < patterns[i].pattern_len; j++)
+ if (patterns[i].mask[j / 8] & BIT(j % 8))
+ ath_bitmask[j] = 0xff;
+
+ new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
+ }
+ }
+
+ if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
+ return -EINVAL;
+
+ ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+ pattern_id,
+ new_pattern.pattern,
+ new_pattern.mask,
+ new_pattern.pattern_len,
+ new_pattern.pkt_offset);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
+ pattern_id,
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ pattern_id++;
+ __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+ }
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ if (!test_bit(i, &wow_mask))
+ continue;
+ ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_set_wakeups(struct ath11k *ar,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_vif_wow_set_wakeups(arvif, wowlan);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif)
+{
+ int ret = 0;
+ struct ath11k *ar = arvif->ar;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ if (ar->nlo_enabled) {
+ struct wmi_pno_scan_req *pno;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ pno->enable = 0;
+ ar->nlo_enabled = false;
+ ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int ath11k_wow_nlo_cleanup(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_vif_wow_clean_nlo(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_set_hw_filter(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ u32 bitmap;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC |
+ WMI_HW_DATA_FILTER_DROP_NON_ARP_BC;
+ ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id,
+ bitmap,
+ true);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_clear_hw_filter(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
+ !arvif->is_up ||
+ !arvif->rekey_data.enable_offload)
+ continue;
+
+ /* get rekey info before disable rekey offload */
+ if (!enable) {
+ ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
+{
+ int ret;
+
+ ret = ath11k_wow_arp_ns_offload(ar, enable);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ ret = ath11k_gtk_rekey_offload(ar, enable);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_set_keepalive(struct ath11k *ar,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_mac_vif_set_keepalive(arvif, method, interval);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ ret = ath11k_mac_wait_tx_complete(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to stop dp rx (and timer) pktlog during wow suspend: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_cleanup(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_set_wakeups(ar, wowlan);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_wow_protocol_offload(ar, true);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_wow_set_hw_filter(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_wow_enable(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_dp_rx_pktlog_stop(ar->ab, false);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to stop dp rx pktlog during wow suspend: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ath11k_ce_stop_shadow_timers(ar->ab);
+ ath11k_dp_stop_shadow_timers(ar->ab);
+
+ ath11k_hif_irq_disable(ar->ab);
+ ath11k_hif_ce_irq_disable(ar->ab);
+
+ ret = ath11k_hif_suspend(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
+ goto wakeup;
+ }
+
+ goto exit;
+
+wakeup:
+ ath11k_wow_wakeup(ar->ab);
+
+cleanup:
+ ath11k_wow_cleanup(ar);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ? 1 : 0;
+}
+
+void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ device_set_wakeup_enable(ar->ab->dev, enabled);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+int ath11k_wow_op_resume(struct ieee80211_hw *hw)
+{
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_hif_resume(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret);
+ goto exit;
+ }
+
+ ath11k_hif_ce_irq_enable(ar->ab);
+ ath11k_hif_irq_enable(ar->ab);
+
+ ret = ath11k_dp_rx_pktlog_start(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_wakeup(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_nlo_cleanup(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_clear_hw_filter(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_protocol_offload(ar, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
+ goto exit;
+ }
+
+exit:
+ if (ret) {
+ switch (ar->state) {
+ case ATH11K_STATE_ON:
+ ar->state = ATH11K_STATE_RESTARTING;
+ ret = 1;
+ break;
+ case ATH11K_STATE_OFF:
+ case ATH11K_STATE_RESTARTING:
+ case ATH11K_STATE_RESTARTED:
+ case ATH11K_STATE_WEDGED:
+ case ATH11K_STATE_FTM:
+ ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
+ ar->state);
+ ret = -EIO;
+ break;
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath11k_wow_init(struct ath11k *ar)
+{
+ if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
+ return 0;
+
+ ar->wow.wowlan_support = ath11k_wowlan_support;
+
+ if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
+ ATH11K_HW_TXRX_NATIVE_WIFI) {
+ ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
+ ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
+ ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ }
+
+ ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS;
+ ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+ ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+ device_set_wakeup_capable(ar->ab->dev, true);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/wow.h b/drivers/net/wireless/ath/ath11k/wow.h
new file mode 100644
index 000000000000..c85811e3f42b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/wow.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _WOW_H_
+#define _WOW_H_
+
+struct ath11k_wow {
+ u32 max_num_patterns;
+ struct completion wakeup_completed;
+ struct wiphy_wowlan_support wowlan_support;
+};
+
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+#define ATH11K_WOW_RETRY_NUM 3
+#define ATH11K_WOW_RETRY_WAIT_MS 200
+#define ATH11K_WOW_PATTERNS 22
+
+#ifdef CONFIG_PM
+
+int ath11k_wow_init(struct ath11k *ar);
+int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath11k_wow_op_resume(struct ieee80211_hw *hw);
+void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+int ath11k_wow_enable(struct ath11k_base *ab);
+int ath11k_wow_wakeup(struct ath11k_base *ab);
+
+#else
+
+static inline int ath11k_wow_init(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline int ath11k_wow_enable(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline int ath11k_wow_wakeup(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+#endif /* _WOW_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig
new file mode 100644
index 000000000000..1ea1af1b8f6c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/Kconfig
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+config ATH12K
+ tristate "Qualcomm Technologies Wi-Fi 7 support (ath12k)"
+ depends on MAC80211 && HAS_DMA && PCI
+ select CRYPTO_MICHAEL_MIC
+ select QCOM_QMI_HELPERS
+ select MHI_BUS
+ select QRTR
+ select QRTR_MHI
+ select PCI_PWRCTRL_PWRSEQ if HAVE_PWRCTRL
+ help
+ Enable support for Qualcomm Technologies Wi-Fi 7 (IEEE
+ 802.11be) family of chipsets, for example WCN7850 and
+ QCN9274.
+
+ If you choose to build a module, it'll be called ath12k.
+
+config ATH12K_AHB
+ bool "QTI ath12k AHB support"
+ depends on ATH12K && REMOTEPROC
+ select QCOM_MDT_LOADER
+ select QCOM_SCM
+ help
+ Enable support for Ath12k AHB bus chipsets, example IPQ5332.
+
+config ATH12K_DEBUG
+ bool "ath12k debugging"
+ depends on ATH12K
+ help
+ Enable debug support, for example debug messages which must
+ be enabled separately using the debug_mask module parameter.
+
+ If unsure, say Y to make it easier to debug problems. But if
+ you want optimal performance choose N.
+
+config ATH12K_DEBUGFS
+ bool "QTI ath12k debugfs support"
+ depends on ATH12K && MAC80211_DEBUGFS
+ help
+ Enable ath12k debugfs support
+
+ If unsure, say Y to make it easier to debug problems. But if
+ you want optimal performance choose N.
+
+config ATH12K_TRACING
+ bool "ath12k tracing support"
+ depends on ATH12K && EVENT_TRACING
+ help
+ Enable ath12k tracing infrastructure.
+
+ If unsure, say Y to make it easier to debug problems. But if
+ you want optimal performance choose N.
+
+config ATH12K_COREDUMP
+ bool "ath12k coredump"
+ depends on ATH12K
+ select WANT_DEV_COREDUMP
+ help
+ Enable ath12k coredump collection
+
+ If unsure, say Y to make it easier to debug problems. But if
+ dump collection not required choose N.
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
new file mode 100644
index 000000000000..d95ee525a6cd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+obj-$(CONFIG_ATH12K) += ath12k.o
+ath12k-y += core.o \
+ hal.o \
+ hal_tx.o \
+ hal_rx.o \
+ wmi.o \
+ mac.o \
+ reg.o \
+ htc.o \
+ qmi.o \
+ dp.o \
+ dp_tx.o \
+ dp_rx.o \
+ debug.o \
+ ce.o \
+ peer.o \
+ dbring.o \
+ hw.o \
+ mhi.o \
+ pci.o \
+ dp_mon.o \
+ fw.o \
+ p2p.o
+
+ath12k-$(CONFIG_ATH12K_AHB) += ahb.o
+ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
+ath12k-$(CONFIG_ACPI) += acpi.o
+ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
+ath12k-$(CONFIG_PM) += wow.o
+ath12k-$(CONFIG_ATH12K_COREDUMP) += coredump.o
+ath12k-$(CONFIG_NL80211_TESTMODE) += testmode.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath12k/acpi.c b/drivers/net/wireless/ath/ath12k/acpi.c
new file mode 100644
index 000000000000..d81367ce6929
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/acpi.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "acpi.h"
+#include "debug.h"
+
+static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func)
+{
+ union acpi_object *obj;
+ acpi_handle root_handle;
+ int ret, i;
+
+ root_handle = ACPI_HANDLE(ab->dev);
+ if (!root_handle) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "invalid acpi handler\n");
+ return -EOPNOTSUPP;
+ }
+
+ obj = acpi_evaluate_dsm(root_handle, ab->hw_params->acpi_guid, 0, func,
+ NULL);
+
+ if (!obj) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi_evaluate_dsm() failed\n");
+ return -ENOENT;
+ }
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ switch (func) {
+ case ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS:
+ ab->acpi.func_bit = obj->integer.value;
+ break;
+ case ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG:
+ ab->acpi.bit_flag = obj->integer.value;
+ break;
+ }
+ } else if (obj->type == ACPI_TYPE_STRING) {
+ switch (func) {
+ case ATH12K_ACPI_DSM_FUNC_BDF_EXT:
+ if (obj->string.length <= ATH12K_ACPI_BDF_ANCHOR_STRING_LEN ||
+ obj->string.length > ATH12K_ACPI_BDF_MAX_LEN ||
+ memcmp(obj->string.pointer, ATH12K_ACPI_BDF_ANCHOR_STRING,
+ ATH12K_ACPI_BDF_ANCHOR_STRING_LEN)) {
+ ath12k_warn(ab, "invalid ACPI DSM BDF size: %d\n",
+ obj->string.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(ab->acpi.bdf_string, obj->string.pointer,
+ obj->buffer.length);
+
+ break;
+ }
+ } else if (obj->type == ACPI_TYPE_BUFFER) {
+ switch (func) {
+ case ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS:
+ if (obj->buffer.length < ATH12K_ACPI_DSM_FUNC_MIN_BITMAP_SIZE ||
+ obj->buffer.length > ATH12K_ACPI_DSM_FUNC_MAX_BITMAP_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM func size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ab->acpi.func_bit = 0;
+ for (i = 0; i < obj->buffer.length; i++)
+ ab->acpi.func_bit += obj->buffer.pointer[i] << (i * 8);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_TAS_CFG:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_CFG_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM TAS config size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.tas_cfg, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_TAS_DATA:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM TAS data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.tas_sar_power_table, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_BIOS_SAR:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI BIOS SAR data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.bios_sar_data, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_GEO_OFFSET:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI GEO OFFSET data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.geo_offset_data, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_INDEX_CCA:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_CCA_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM CCA data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.cca_data, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM band edge data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.band_edge_power, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ }
+ } else {
+ ath12k_warn(ab, "ACPI DSM method returned an unsupported object type: %d\n",
+ obj->type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ ACPI_FREE(obj);
+ return ret;
+}
+
+static int ath12k_acpi_set_power_limit(struct ath12k_base *ab)
+{
+ const u8 *tas_sar_power_table = ab->acpi.tas_sar_power_table;
+ int ret;
+
+ if (tas_sar_power_table[0] != ATH12K_ACPI_TAS_DATA_VERSION ||
+ tas_sar_power_table[1] != ATH12K_ACPI_TAS_DATA_ENABLE) {
+ ath12k_warn(ab, "latest ACPI TAS data is invalid\n");
+ return -EINVAL;
+ }
+
+ ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_DATA_TYPE,
+ tas_sar_power_table,
+ ATH12K_ACPI_DSM_TAS_DATA_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS data table: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath12k_acpi_set_bios_sar_power(struct ath12k_base *ab)
+{
+ int ret;
+
+ if (ab->acpi.bios_sar_data[0] != ATH12K_ACPI_POWER_LIMIT_VERSION ||
+ ab->acpi.bios_sar_data[1] != ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG) {
+ ath12k_warn(ab, "invalid latest ACPI BIOS SAR data\n");
+ return -EINVAL;
+ }
+
+ ret = ath12k_wmi_set_bios_sar_cmd(ab, ab->acpi.bios_sar_data);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI BIOS SAR table: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath12k_acpi_dsm_notify(acpi_handle handle, u32 event, void *data)
+{
+ int ret;
+ struct ath12k_base *ab = data;
+
+ if (event == ATH12K_ACPI_NOTIFY_EVENT) {
+ ath12k_warn(ab, "unknown acpi notify %u\n", event);
+ return;
+ }
+
+ if (!ab->acpi.acpi_tas_enable) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi_tas_enable is false\n");
+ return;
+ }
+
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_DATA);
+ if (ret) {
+ ath12k_warn(ab, "failed to update ACPI TAS data table: %d\n", ret);
+ return;
+ }
+
+ ret = ath12k_acpi_set_power_limit(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI TAS power limit data: %d", ret);
+ return;
+ }
+
+ if (!ab->acpi.acpi_bios_sar_enable)
+ return;
+
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BIOS_SAR);
+ if (ret) {
+ ath12k_warn(ab, "failed to update BIOS SAR: %d\n", ret);
+ return;
+ }
+
+ ret = ath12k_acpi_set_bios_sar_power(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to set BIOS SAR power limit: %d\n", ret);
+ return;
+ }
+}
+
+static int ath12k_acpi_set_bios_sar_params(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_wmi_set_bios_sar_cmd(ab, ab->acpi.bios_sar_data);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI BIOS SAR table: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_wmi_set_bios_geo_cmd(ab, ab->acpi.geo_offset_data);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI BIOS GEO table: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_acpi_set_tas_params(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_CONFIG_TYPE,
+ ab->acpi.tas_cfg,
+ ATH12K_ACPI_DSM_TAS_CFG_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS config table parameter: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_DATA_TYPE,
+ ab->acpi.tas_sar_power_table,
+ ATH12K_ACPI_DSM_TAS_DATA_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS data table parameter: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab)
+{
+ return ab->acpi.acpi_disable_rfkill;
+}
+
+bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab)
+{
+ return ab->acpi.acpi_disable_11be;
+}
+
+void ath12k_acpi_set_dsm_func(struct ath12k_base *ab)
+{
+ int ret;
+ u8 *buf;
+
+ if (!ab->hw_params->acpi_guid)
+ /* not supported with this hardware */
+ return;
+
+ if (ab->acpi.acpi_tas_enable) {
+ ret = ath12k_acpi_set_tas_params(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS parameters: %d\n", ret);
+ return;
+ }
+ }
+
+ if (ab->acpi.acpi_bios_sar_enable) {
+ ret = ath12k_acpi_set_bios_sar_params(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI BIOS SAR: %d\n", ret);
+ return;
+ }
+ }
+
+ if (ab->acpi.acpi_cca_enable) {
+ buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET;
+ ret = ath12k_wmi_set_bios_cmd(ab,
+ WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE,
+ buf,
+ ATH12K_ACPI_CCA_THR_OFFSET_LEN);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n",
+ ret);
+ return;
+ }
+ }
+
+ if (ab->acpi.acpi_band_edge_enable) {
+ ret = ath12k_wmi_set_bios_cmd(ab,
+ WMI_BIOS_PARAM_TYPE_BANDEDGE,
+ ab->acpi.band_edge_power,
+ sizeof(ab->acpi.band_edge_power));
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to set ACPI DSM band edge channel power: %d\n",
+ ret);
+ return;
+ }
+ }
+}
+
+int ath12k_acpi_start(struct ath12k_base *ab)
+{
+ acpi_status status;
+ int ret;
+
+ ab->acpi.acpi_tas_enable = false;
+ ab->acpi.acpi_disable_11be = false;
+ ab->acpi.acpi_disable_rfkill = false;
+ ab->acpi.acpi_bios_sar_enable = false;
+ ab->acpi.acpi_cca_enable = false;
+ ab->acpi.acpi_band_edge_enable = false;
+ ab->acpi.acpi_enable_bdf = false;
+ ab->acpi.bdf_string[0] = '\0';
+
+ if (!ab->hw_params->acpi_guid)
+ /* not supported with this hardware */
+ return 0;
+
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS);
+ if (ret) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to get ACPI DSM data: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_DISABLE_FLAG)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI DISABLE FLAG: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_CHEK_BIT_VALID(ab->acpi,
+ ATH12K_ACPI_DSM_DISABLE_11BE_BIT))
+ ab->acpi.acpi_disable_11be = true;
+
+ if (!ATH12K_ACPI_CHEK_BIT_VALID(ab->acpi,
+ ATH12K_ACPI_DSM_DISABLE_RFKILL_BIT))
+ ab->acpi.acpi_disable_rfkill = true;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BDF_EXT)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BDF_EXT);
+ if (ret || ab->acpi.bdf_string[0] == '\0') {
+ ath12k_warn(ab, "failed to get ACPI BDF EXT: %d\n", ret);
+ return ret;
+ }
+
+ ab->acpi.acpi_enable_bdf = true;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_CFG);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI TAS config table: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_DATA)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_DATA);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI TAS data table: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG) &&
+ ab->acpi.tas_sar_power_table[0] == ATH12K_ACPI_TAS_DATA_VERSION &&
+ ab->acpi.tas_sar_power_table[1] == ATH12K_ACPI_TAS_DATA_ENABLE)
+ ab->acpi.acpi_tas_enable = true;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BIOS_SAR)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BIOS_SAR);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI bios sar data: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_GEO_OFFSET)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_GEO_OFFSET);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI geo offset data: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BIOS_SAR) &&
+ ab->acpi.bios_sar_data[0] == ATH12K_ACPI_POWER_LIMIT_VERSION &&
+ ab->acpi.bios_sar_data[1] == ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG &&
+ !ab->acpi.acpi_tas_enable)
+ ab->acpi.acpi_bios_sar_enable = true;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_CCA)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_CCA);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI DSM CCA threshold configuration: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ab->acpi.cca_data[0] == ATH12K_ACPI_CCA_THR_VERSION &&
+ ab->acpi.cca_data[ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET] ==
+ ATH12K_ACPI_CCA_THR_ENABLE_FLAG)
+ ab->acpi.acpi_cca_enable = true;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi,
+ ATH12K_ACPI_FUNC_BIT_BAND_EDGE_CHAN_POWER)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI DSM band edge channel power: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ab->acpi.band_edge_power[0] == ATH12K_ACPI_BAND_EDGE_VERSION &&
+ ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG)
+ ab->acpi.acpi_band_edge_enable = true;
+ }
+
+ status = acpi_install_notify_handler(ACPI_HANDLE(ab->dev),
+ ACPI_DEVICE_NOTIFY,
+ ath12k_acpi_dsm_notify, ab);
+ if (ACPI_FAILURE(status)) {
+ ath12k_warn(ab, "failed to install DSM notify callback: %d\n", status);
+ return -EIO;
+ }
+
+ ab->acpi.started = true;
+
+ return 0;
+}
+
+int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab)
+{
+ size_t max_len = sizeof(ab->qmi.target.bdf_ext);
+
+ if (!ab->acpi.acpi_enable_bdf)
+ return -ENODATA;
+
+ if (strscpy(ab->qmi.target.bdf_ext, ab->acpi.bdf_string + 4, max_len) < 0)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "acpi bdf variant longer than the buffer (variant: %s)\n",
+ ab->acpi.bdf_string);
+
+ return 0;
+}
+
+void ath12k_acpi_stop(struct ath12k_base *ab)
+{
+ if (!ab->acpi.started)
+ return;
+
+ acpi_remove_notify_handler(ACPI_HANDLE(ab->dev),
+ ACPI_DEVICE_NOTIFY,
+ ath12k_acpi_dsm_notify);
+
+ memset(&ab->acpi, 0, sizeof(ab->acpi));
+}
diff --git a/drivers/net/wireless/ath/ath12k/acpi.h b/drivers/net/wireless/ath/ath12k/acpi.h
new file mode 100644
index 000000000000..3a26fea6af1a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/acpi.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH12K_ACPI_H
+#define ATH12K_ACPI_H
+
+#include <linux/acpi.h>
+
+#define ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS 0
+#define ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG 2
+#define ATH12K_ACPI_DSM_FUNC_BDF_EXT 3
+#define ATH12K_ACPI_DSM_FUNC_BIOS_SAR 4
+#define ATH12K_ACPI_DSM_FUNC_GEO_OFFSET 5
+#define ATH12K_ACPI_DSM_FUNC_INDEX_CCA 6
+#define ATH12K_ACPI_DSM_FUNC_TAS_CFG 8
+#define ATH12K_ACPI_DSM_FUNC_TAS_DATA 9
+#define ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE 10
+
+#define ATH12K_ACPI_FUNC_BIT_DISABLE_FLAG BIT(1)
+#define ATH12K_ACPI_FUNC_BIT_BDF_EXT BIT(2)
+#define ATH12K_ACPI_FUNC_BIT_BIOS_SAR BIT(3)
+#define ATH12K_ACPI_FUNC_BIT_GEO_OFFSET BIT(4)
+#define ATH12K_ACPI_FUNC_BIT_CCA BIT(5)
+#define ATH12K_ACPI_FUNC_BIT_TAS_CFG BIT(7)
+#define ATH12K_ACPI_FUNC_BIT_TAS_DATA BIT(8)
+#define ATH12K_ACPI_FUNC_BIT_BAND_EDGE_CHAN_POWER BIT(9)
+
+#define ATH12K_ACPI_NOTIFY_EVENT 0x86
+#define ATH12K_ACPI_FUNC_BIT_VALID(_acdata, _func) (((_acdata).func_bit) & (_func))
+#define ATH12K_ACPI_CHEK_BIT_VALID(_acdata, _func) (((_acdata).bit_flag) & (_func))
+
+#define ATH12K_ACPI_TAS_DATA_VERSION 0x1
+#define ATH12K_ACPI_TAS_DATA_ENABLE 0x1
+#define ATH12K_ACPI_POWER_LIMIT_VERSION 0x1
+#define ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG 0x1
+#define ATH12K_ACPI_CCA_THR_VERSION 0x1
+#define ATH12K_ACPI_CCA_THR_ENABLE_FLAG 0x1
+#define ATH12K_ACPI_BAND_EDGE_VERSION 0x1
+#define ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG 0x1
+
+#define ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET 1
+#define ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET 2
+#define ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET 5
+#define ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN 10
+#define ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET 12
+#define ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN 18
+#define ATH12K_ACPI_BIOS_SAR_TABLE_LEN 22
+#define ATH12K_ACPI_CCA_THR_OFFSET_LEN 36
+
+#define ATH12K_ACPI_DSM_TAS_DATA_SIZE 69
+#define ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE 100
+#define ATH12K_ACPI_DSM_TAS_CFG_SIZE 108
+
+#define ATH12K_ACPI_DSM_FUNC_MIN_BITMAP_SIZE 1
+#define ATH12K_ACPI_DSM_FUNC_MAX_BITMAP_SIZE 4
+
+#define ATH12K_ACPI_DSM_DISABLE_11BE_BIT BIT(0)
+#define ATH12K_ACPI_DSM_DISABLE_RFKILL_BIT BIT(2)
+
+#define ATH12K_ACPI_BDF_ANCHOR_STRING_LEN 3
+#define ATH12K_ACPI_BDF_ANCHOR_STRING "BDF"
+#define ATH12K_ACPI_BDF_MAX_LEN 100
+
+#define ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE (ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET + \
+ ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN)
+#define ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE (ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET + \
+ ATH12K_ACPI_BIOS_SAR_TABLE_LEN)
+#define ATH12K_ACPI_DSM_CCA_DATA_SIZE (ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET + \
+ ATH12K_ACPI_CCA_THR_OFFSET_LEN)
+
+#ifdef CONFIG_ACPI
+
+int ath12k_acpi_start(struct ath12k_base *ab);
+void ath12k_acpi_stop(struct ath12k_base *ab);
+bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab);
+bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab);
+void ath12k_acpi_set_dsm_func(struct ath12k_base *ab);
+int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab);
+
+#else
+
+static inline int ath12k_acpi_start(struct ath12k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath12k_acpi_stop(struct ath12k_base *ab)
+{
+}
+
+static inline bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab)
+{
+ return false;
+}
+
+static inline bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab)
+{
+ return false;
+}
+
+static inline void ath12k_acpi_set_dsm_func(struct ath12k_base *ab)
+{
+}
+
+static inline int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ACPI */
+
+#endif /* ATH12K_ACPI_H */
diff --git a/drivers/net/wireless/ath/ath12k/ahb.c b/drivers/net/wireless/ath/ath12k/ahb.c
new file mode 100644
index 000000000000..b30527c402f6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/ahb.c
@@ -0,0 +1,1156 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/soc/qcom/smem_state.h>
+#include "ahb.h"
+#include "debug.h"
+#include "hif.h"
+
+static const struct of_device_id ath12k_ahb_of_match[] = {
+ { .compatible = "qcom,ipq5332-wifi",
+ .data = (void *)ATH12K_HW_IPQ5332_HW10,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ath12k_ahb_of_match);
+
+#define ATH12K_IRQ_CE0_OFFSET 4
+#define ATH12K_MAX_UPDS 1
+#define ATH12K_UPD_IRQ_WRD_LEN 18
+static const char ath12k_userpd_irq[][9] = {"spawn",
+ "ready",
+ "stop-ack"};
+
+static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
+ "misc-pulse1",
+ "misc-latch",
+ "sw-exception",
+ "watchdog",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring4",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+enum ext_irq_num {
+ host2wbm_desc_feed = 16,
+ host2reo_re_injection,
+ host2reo_command,
+ host2rxdma_monitor_ring3,
+ host2rxdma_monitor_ring2,
+ host2rxdma_monitor_ring1,
+ reo2host_exception,
+ wbm2host_rx_release,
+ reo2host_status,
+ reo2host_destination_ring4,
+ reo2host_destination_ring3,
+ reo2host_destination_ring2,
+ reo2host_destination_ring1,
+ rxdma2host_monitor_destination_mac3,
+ rxdma2host_monitor_destination_mac2,
+ rxdma2host_monitor_destination_mac1,
+ ppdu_end_interrupts_mac3,
+ ppdu_end_interrupts_mac2,
+ ppdu_end_interrupts_mac1,
+ rxdma2host_monitor_status_ring_mac3,
+ rxdma2host_monitor_status_ring_mac2,
+ rxdma2host_monitor_status_ring_mac1,
+ host2rxdma_host_buf_ring_mac3,
+ host2rxdma_host_buf_ring_mac2,
+ host2rxdma_host_buf_ring_mac1,
+ rxdma2host_destination_ring_mac3,
+ rxdma2host_destination_ring_mac2,
+ rxdma2host_destination_ring_mac1,
+ host2tcl_input_ring4,
+ host2tcl_input_ring3,
+ host2tcl_input_ring2,
+ host2tcl_input_ring1,
+ wbm2host_tx_completions_ring4,
+ wbm2host_tx_completions_ring3,
+ wbm2host_tx_completions_ring2,
+ wbm2host_tx_completions_ring1,
+ tcl2host_status_ring,
+};
+
+static u32 ath12k_ahb_read32(struct ath12k_base *ab, u32 offset)
+{
+ if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
+ return ioread32(ab->mem_ce + offset);
+ return ioread32(ab->mem + offset);
+}
+
+static void ath12k_ahb_write32(struct ath12k_base *ab, u32 offset,
+ u32 value)
+{
+ if (ab->ce_remap && offset < HAL_SEQ_WCSS_CMEM_OFFSET)
+ iowrite32(value, ab->mem_ce + offset);
+ else
+ iowrite32(value, ab->mem + offset);
+}
+
+static void ath12k_ahb_cancel_workqueue(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ cancel_work_sync(&ce_pipe->intr_wq);
+ }
+}
+
+static void ath12k_ahb_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ ath12k_ahb_ext_grp_disable(irq_grp);
+ if (irq_grp->napi_enabled) {
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ irq_grp->napi_enabled = false;
+ }
+ }
+}
+
+static void ath12k_ahb_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void ath12k_ahb_setbit32(struct ath12k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath12k_ahb_read32(ab, offset);
+ ath12k_ahb_write32(ab, offset, val | BIT(bit));
+}
+
+static void ath12k_ahb_clearbit32(struct ath12k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath12k_ahb_read32(ab, offset);
+ ath12k_ahb_write32(ab, offset, val & ~BIT(bit));
+}
+
+static void ath12k_ahb_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
+{
+ const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
+
+ ce_attr = &ab->hw_params->host_ce_config[ce_id];
+ if (ce_attr->src_nentries)
+ ath12k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
+
+ if (ce_attr->dest_nentries) {
+ ath12k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
+ ath12k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ ie3_reg_addr);
+ }
+}
+
+static void ath12k_ahb_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
+{
+ const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr;
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr;
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr;
+
+ ce_attr = &ab->hw_params->host_ce_config[ce_id];
+ if (ce_attr->src_nentries)
+ ath12k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
+
+ if (ce_attr->dest_nentries) {
+ ath12k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
+ ath12k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ ie3_reg_addr);
+ }
+}
+
+static void ath12k_ahb_sync_ce_irqs(struct ath12k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath12k_ahb_sync_ext_irqs(struct ath12k_base *ab)
+{
+ int i, j;
+ int irq_idx;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+static void ath12k_ahb_ce_irqs_enable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath12k_ahb_ce_irq_enable(ab, i);
+ }
+}
+
+static void ath12k_ahb_ce_irqs_disable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath12k_ahb_ce_irq_disable(ab, i);
+ }
+}
+
+static int ath12k_ahb_start(struct ath12k_base *ab)
+{
+ ath12k_ahb_ce_irqs_enable(ab);
+ ath12k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+
+static void ath12k_ahb_ext_irq_enable(struct ath12k_base *ab)
+{
+ struct ath12k_ext_irq_grp *irq_grp;
+ int i;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ irq_grp = &ab->ext_irq_grp[i];
+ if (!irq_grp->napi_enabled) {
+ napi_enable(&irq_grp->napi);
+ irq_grp->napi_enabled = true;
+ }
+ ath12k_ahb_ext_grp_enable(irq_grp);
+ }
+}
+
+static void ath12k_ahb_ext_irq_disable(struct ath12k_base *ab)
+{
+ __ath12k_ahb_ext_irq_disable(ab);
+ ath12k_ahb_sync_ext_irqs(ab);
+}
+
+static void ath12k_ahb_stop(struct ath12k_base *ab)
+{
+ if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath12k_ahb_ce_irqs_disable(ab);
+ ath12k_ahb_sync_ce_irqs(ab);
+ ath12k_ahb_cancel_workqueue(ab);
+ timer_delete_sync(&ab->rx_replenish_retry);
+ ath12k_ce_cleanup_pipes(ab);
+}
+
+static int ath12k_ahb_power_up(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ char fw_name[ATH12K_USERPD_FW_NAME_LEN];
+ char fw2_name[ATH12K_USERPD_FW_NAME_LEN];
+ struct device *dev = ab->dev;
+ const struct firmware *fw, *fw2;
+ struct reserved_mem *rmem = NULL;
+ unsigned long time_left;
+ phys_addr_t mem_phys;
+ void *mem_region;
+ size_t mem_size;
+ u32 pasid;
+ int ret;
+
+ rmem = ath12k_core_get_reserved_mem(ab, 0);
+ if (!rmem)
+ return -ENODEV;
+
+ mem_phys = rmem->base;
+ mem_size = rmem->size;
+ mem_region = devm_memremap(dev, mem_phys, mem_size, MEMREMAP_WC);
+ if (IS_ERR(mem_region)) {
+ ath12k_err(ab, "unable to map memory region: %pa+%pa\n",
+ &rmem->base, &rmem->size);
+ return PTR_ERR(mem_region);
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "%s/%s/%s%d%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, ATH12K_AHB_FW_PREFIX, ab_ahb->userpd_id,
+ ATH12K_AHB_FW_SUFFIX);
+
+ ret = request_firmware(&fw, fw_name, dev);
+ if (ret < 0) {
+ ath12k_err(ab, "request_firmware failed\n");
+ return ret;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw_name,
+ fw->size);
+
+ if (!fw->size) {
+ ath12k_err(ab, "Invalid firmware size\n");
+ ret = -EINVAL;
+ goto err_fw;
+ }
+
+ pasid = (u32_encode_bits(ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
+ ATH12K_AHB_UPD_SWID;
+
+ /* Load FW image to a reserved memory location */
+ ret = qcom_mdt_load(dev, fw, fw_name, pasid, mem_region, mem_phys, mem_size,
+ &mem_phys);
+ if (ret) {
+ ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
+ goto err_fw;
+ }
+
+ snprintf(fw2_name, sizeof(fw2_name), "%s/%s/%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, ATH12K_AHB_FW2);
+
+ ret = request_firmware(&fw2, fw2_name, dev);
+ if (ret < 0) {
+ ath12k_err(ab, "request_firmware failed\n");
+ goto err_fw;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "Booting fw image %s, size %zd\n", fw2_name,
+ fw2->size);
+
+ if (!fw2->size) {
+ ath12k_err(ab, "Invalid firmware size\n");
+ ret = -EINVAL;
+ goto err_fw2;
+ }
+
+ ret = qcom_mdt_load_no_init(dev, fw2, fw2_name, mem_region, mem_phys,
+ mem_size, &mem_phys);
+ if (ret) {
+ ath12k_err(ab, "Failed to load MDT segments: %d\n", ret);
+ goto err_fw2;
+ }
+
+ /* Authenticate FW image using peripheral ID */
+ ret = qcom_scm_pas_auth_and_reset(pasid);
+ if (ret) {
+ ath12k_err(ab, "failed to boot the remote processor %d\n", ret);
+ goto err_fw2;
+ }
+
+ /* Instruct Q6 to spawn userPD thread */
+ ret = qcom_smem_state_update_bits(ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit),
+ BIT(ab_ahb->spawn_bit));
+ if (ret) {
+ ath12k_err(ab, "Failed to update spawn state %d\n", ret);
+ goto err_fw2;
+ }
+
+ time_left = wait_for_completion_timeout(&ab_ahb->userpd_spawned,
+ ATH12K_USERPD_SPAWN_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "UserPD spawn wait timed out\n");
+ ret = -ETIMEDOUT;
+ goto err_fw2;
+ }
+
+ time_left = wait_for_completion_timeout(&ab_ahb->userpd_ready,
+ ATH12K_USERPD_READY_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "UserPD ready wait timed out\n");
+ ret = -ETIMEDOUT;
+ goto err_fw2;
+ }
+
+ qcom_smem_state_update_bits(ab_ahb->spawn_state, BIT(ab_ahb->spawn_bit), 0);
+
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "UserPD%d is now UP\n", ab_ahb->userpd_id);
+
+err_fw2:
+ release_firmware(fw2);
+err_fw:
+ release_firmware(fw);
+ return ret;
+}
+
+static void ath12k_ahb_power_down(struct ath12k_base *ab, bool is_suspend)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ unsigned long time_left;
+ u32 pasid;
+ int ret;
+
+ qcom_smem_state_update_bits(ab_ahb->stop_state, BIT(ab_ahb->stop_bit),
+ BIT(ab_ahb->stop_bit));
+
+ time_left = wait_for_completion_timeout(&ab_ahb->userpd_stopped,
+ ATH12K_USERPD_STOP_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "UserPD stop wait timed out\n");
+ return;
+ }
+
+ qcom_smem_state_update_bits(ab_ahb->stop_state, BIT(ab_ahb->stop_bit), 0);
+
+ pasid = (u32_encode_bits(ab_ahb->userpd_id, ATH12K_USERPD_ID_MASK)) |
+ ATH12K_AHB_UPD_SWID;
+ /* Release the firmware */
+ ret = qcom_scm_pas_shutdown(pasid);
+ if (ret)
+ ath12k_err(ab, "scm pas shutdown failed for userPD%d: %d\n",
+ ab_ahb->userpd_id, ret);
+}
+
+static void ath12k_ahb_init_qmi_ce_config(struct ath12k_base *ab)
+{
+ struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+
+ cfg->tgt_ce_len = ab->hw_params->target_ce_count;
+ cfg->tgt_ce = ab->hw_params->target_ce_config;
+ cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
+ cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
+ ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
+}
+
+static void ath12k_ahb_ce_workqueue(struct work_struct *work)
+{
+ struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq);
+
+ ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ ath12k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+}
+
+static irqreturn_t ath12k_ahb_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath12k_ce_pipe *ce_pipe = arg;
+
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+
+ ath12k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+
+ queue_work(system_bh_wq, &ce_pipe->intr_wq);
+
+ return IRQ_HANDLED;
+}
+
+static int ath12k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath12k_ext_irq_grp,
+ napi);
+ struct ath12k_base *ab = irq_grp->ab;
+ int work_done;
+
+ work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ ath12k_ahb_ext_grp_enable(irq_grp);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath12k_ahb_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath12k_ext_irq_grp *irq_grp = arg;
+
+ /* last interrupt received for this group */
+ irq_grp->timestamp = jiffies;
+
+ ath12k_ahb_ext_grp_disable(irq_grp);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath12k_ahb_config_ext_irq(struct ath12k_base *ab)
+{
+ const struct ath12k_hw_ring_mask *ring_mask;
+ struct ath12k_ext_irq_grp *irq_grp;
+ const struct hal_ops *hal_ops;
+ int i, j, irq, irq_idx, ret;
+ u32 num_irq;
+
+ ring_mask = ab->hw_params->ring_mask;
+ hal_ops = ab->hw_params->hal_ops;
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ irq_grp = &ab->ext_irq_grp[i];
+ num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+
+ irq_grp->napi_ndev = alloc_netdev_dummy(0);
+ if (!irq_grp->napi_ndev)
+ return -ENOMEM;
+
+ netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
+ ath12k_ahb_ext_grp_napi_poll);
+
+ for (j = 0; j < ATH12K_EXT_IRQ_NUM_MAX; j++) {
+ /* For TX ring, ensure that the ring mask and the
+ * tcl_to_wbm_rbm_map point to the same ring number.
+ */
+ if (ring_mask->tx[i] &
+ BIT(hal_ops->tcl_to_wbm_rbm_map[j].wbm_ring_num)) {
+ irq_grp->irqs[num_irq++] =
+ wbm2host_tx_completions_ring1 - j;
+ }
+
+ if (ring_mask->rx[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ reo2host_destination_ring1 - j;
+ }
+
+ if (ring_mask->rx_err[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_exception;
+
+ if (ring_mask->rx_wbm_rel[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = wbm2host_rx_release;
+
+ if (ring_mask->reo_status[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_status;
+
+ if (ring_mask->rx_mon_dest[i] & BIT(j))
+ irq_grp->irqs[num_irq++] =
+ rxdma2host_monitor_destination_mac1;
+ }
+
+ irq_grp->num_irq = num_irq;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+
+ irq = platform_get_irq_byname(ab->pdev,
+ irq_name[irq_idx]);
+ ab->irq_num[irq_idx] = irq;
+ irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
+ ret = devm_request_irq(ab->dev, irq,
+ ath12k_ahb_ext_interrupt_handler,
+ IRQF_TRIGGER_RISING,
+ irq_name[irq_idx], irq_grp);
+ if (ret)
+ ath12k_warn(ab, "failed request_irq for %d\n", irq);
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_ahb_config_irq(struct ath12k_base *ab)
+{
+ int irq, irq_idx, i;
+ int ret;
+
+ /* Configure CE irqs */
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH12K_IRQ_CE0_OFFSET + i;
+
+ INIT_WORK(&ce_pipe->intr_wq, ath12k_ahb_ce_workqueue);
+ irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
+ ret = devm_request_irq(ab->dev, irq, ath12k_ahb_ce_interrupt_handler,
+ IRQF_TRIGGER_RISING, irq_name[irq_idx],
+ ce_pipe);
+ if (ret)
+ return ret;
+
+ ab->irq_num[irq_idx] = irq;
+ }
+
+ /* Configure external interrupts */
+ ret = ath12k_ahb_config_ext_irq(ab);
+
+ return ret;
+}
+
+static int ath12k_ahb_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ u32 pipedir;
+ int i;
+
+ for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params->svc_to_ce_map[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ pipedir = __le32_to_cpu(entry->pipedir);
+ if (pipedir == PIPEDIR_IN || pipedir == PIPEDIR_INOUT) {
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ }
+
+ if (pipedir == PIPEDIR_OUT || pipedir == PIPEDIR_INOUT) {
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+static const struct ath12k_hif_ops ath12k_ahb_hif_ops_ipq5332 = {
+ .start = ath12k_ahb_start,
+ .stop = ath12k_ahb_stop,
+ .read32 = ath12k_ahb_read32,
+ .write32 = ath12k_ahb_write32,
+ .irq_enable = ath12k_ahb_ext_irq_enable,
+ .irq_disable = ath12k_ahb_ext_irq_disable,
+ .map_service_to_pipe = ath12k_ahb_map_service_to_pipe,
+ .power_up = ath12k_ahb_power_up,
+ .power_down = ath12k_ahb_power_down,
+};
+
+static irqreturn_t ath12k_userpd_irq_handler(int irq, void *data)
+{
+ struct ath12k_base *ab = data;
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_SPAWN_IRQ]) {
+ complete(&ab_ahb->userpd_spawned);
+ } else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_READY_IRQ]) {
+ complete(&ab_ahb->userpd_ready);
+ } else if (irq == ab_ahb->userpd_irq_num[ATH12K_USERPD_STOP_ACK_IRQ]) {
+ complete(&ab_ahb->userpd_stopped);
+ } else {
+ ath12k_err(ab, "Invalid userpd interrupt\n");
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ath12k_ahb_config_rproc_irq(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ int i, ret;
+ char *upd_irq_name;
+
+ for (i = 0; i < ATH12K_USERPD_MAX_IRQ; i++) {
+ ab_ahb->userpd_irq_num[i] = platform_get_irq_byname(ab->pdev,
+ ath12k_userpd_irq[i]);
+ if (ab_ahb->userpd_irq_num[i] < 0)
+ return ab_ahb->userpd_irq_num[i];
+
+ upd_irq_name = devm_kzalloc(&ab->pdev->dev, ATH12K_UPD_IRQ_WRD_LEN,
+ GFP_KERNEL);
+ if (!upd_irq_name)
+ return -ENOMEM;
+
+ scnprintf(upd_irq_name, ATH12K_UPD_IRQ_WRD_LEN, "UserPD%u-%s",
+ ab_ahb->userpd_id, ath12k_userpd_irq[i]);
+ ret = devm_request_threaded_irq(&ab->pdev->dev, ab_ahb->userpd_irq_num[i],
+ NULL, ath12k_userpd_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ upd_irq_name, ab);
+ if (ret)
+ return dev_err_probe(&ab->pdev->dev, ret,
+ "Request %s irq failed: %d\n",
+ ath12k_userpd_irq[i], ret);
+ }
+
+ ab_ahb->spawn_state = devm_qcom_smem_state_get(&ab->pdev->dev, "spawn",
+ &ab_ahb->spawn_bit);
+ if (IS_ERR(ab_ahb->spawn_state))
+ return dev_err_probe(&ab->pdev->dev, PTR_ERR(ab_ahb->spawn_state),
+ "Failed to acquire spawn state\n");
+
+ ab_ahb->stop_state = devm_qcom_smem_state_get(&ab->pdev->dev, "stop",
+ &ab_ahb->stop_bit);
+ if (IS_ERR(ab_ahb->stop_state))
+ return dev_err_probe(&ab->pdev->dev, PTR_ERR(ab_ahb->stop_state),
+ "Failed to acquire stop state\n");
+
+ init_completion(&ab_ahb->userpd_spawned);
+ init_completion(&ab_ahb->userpd_ready);
+ init_completion(&ab_ahb->userpd_stopped);
+ return 0;
+}
+
+static int ath12k_ahb_root_pd_state_notifier(struct notifier_block *nb,
+ const unsigned long event, void *data)
+{
+ struct ath12k_ahb *ab_ahb = container_of(nb, struct ath12k_ahb, root_pd_nb);
+ struct ath12k_base *ab = ab_ahb->ab;
+
+ if (event == ATH12K_RPROC_AFTER_POWERUP) {
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "Root PD is UP\n");
+ complete(&ab_ahb->rootpd_ready);
+ }
+
+ return 0;
+}
+
+static int ath12k_ahb_register_rproc_notifier(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ ab_ahb->root_pd_nb.notifier_call = ath12k_ahb_root_pd_state_notifier;
+ init_completion(&ab_ahb->rootpd_ready);
+
+ ab_ahb->root_pd_notifier = qcom_register_ssr_notifier(ab_ahb->tgt_rproc->name,
+ &ab_ahb->root_pd_nb);
+ if (IS_ERR(ab_ahb->root_pd_notifier))
+ return PTR_ERR(ab_ahb->root_pd_notifier);
+
+ return 0;
+}
+
+static void ath12k_ahb_unregister_rproc_notifier(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ if (!ab_ahb->root_pd_notifier) {
+ ath12k_err(ab, "Rproc notifier not registered\n");
+ return;
+ }
+
+ qcom_unregister_ssr_notifier(ab_ahb->root_pd_notifier,
+ &ab_ahb->root_pd_nb);
+ ab_ahb->root_pd_notifier = NULL;
+}
+
+static int ath12k_ahb_get_rproc(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ struct device *dev = ab->dev;
+ struct device_node *np;
+ struct rproc *prproc;
+
+ np = of_parse_phandle(dev->of_node, "qcom,rproc", 0);
+ if (!np) {
+ ath12k_err(ab, "failed to get q6_rproc handle\n");
+ return -ENOENT;
+ }
+
+ prproc = rproc_get_by_phandle(np->phandle);
+ of_node_put(np);
+ if (!prproc)
+ return dev_err_probe(&ab->pdev->dev, -EPROBE_DEFER,
+ "failed to get rproc\n");
+
+ ab_ahb->tgt_rproc = prproc;
+
+ return 0;
+}
+
+static int ath12k_ahb_boot_root_pd(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ unsigned long time_left;
+ int ret;
+
+ ret = rproc_boot(ab_ahb->tgt_rproc);
+ if (ret < 0) {
+ ath12k_err(ab, "RootPD boot failed\n");
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ab_ahb->rootpd_ready,
+ ATH12K_ROOTPD_READY_TIMEOUT);
+ if (!time_left) {
+ ath12k_err(ab, "RootPD ready wait timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath12k_ahb_configure_rproc(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ int ret;
+
+ ret = ath12k_ahb_get_rproc(ab);
+ if (ret < 0)
+ return ret;
+
+ ret = ath12k_ahb_register_rproc_notifier(ab);
+ if (ret < 0) {
+ ret = dev_err_probe(&ab->pdev->dev, ret,
+ "failed to register rproc notifier\n");
+ goto err_put_rproc;
+ }
+
+ if (ab_ahb->tgt_rproc->state != RPROC_RUNNING) {
+ ret = ath12k_ahb_boot_root_pd(ab);
+ if (ret < 0) {
+ ath12k_err(ab, "failed to boot the remote processor Q6\n");
+ goto err_unreg_notifier;
+ }
+ }
+
+ return ath12k_ahb_config_rproc_irq(ab);
+
+err_unreg_notifier:
+ ath12k_ahb_unregister_rproc_notifier(ab);
+
+err_put_rproc:
+ rproc_put(ab_ahb->tgt_rproc);
+ return ret;
+}
+
+static void ath12k_ahb_deconfigure_rproc(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ ath12k_ahb_unregister_rproc_notifier(ab);
+ rproc_put(ab_ahb->tgt_rproc);
+}
+
+static int ath12k_ahb_resource_init(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+ struct platform_device *pdev = ab->pdev;
+ struct resource *mem_res;
+ int ret;
+
+ ab->mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
+ if (IS_ERR(ab->mem)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(ab->mem), "ioremap error\n");
+ goto out;
+ }
+
+ ab->mem_len = resource_size(mem_res);
+
+ if (ab->hw_params->ce_remap) {
+ const struct ce_remap *ce_remap = ab->hw_params->ce_remap;
+ /* CE register space is moved out of WCSS and the space is not
+ * contiguous, hence remapping the CE registers to a new space
+ * for accessing them.
+ */
+ ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
+ if (!ab->mem_ce) {
+ dev_err(&pdev->dev, "ce ioremap error\n");
+ ret = -ENOMEM;
+ goto err_mem_unmap;
+ }
+ ab->ce_remap = true;
+ ab->ce_remap_base_addr = HAL_IPQ5332_CE_WFSS_REG_BASE;
+ }
+
+ ab_ahb->xo_clk = devm_clk_get(ab->dev, "xo");
+ if (IS_ERR(ab_ahb->xo_clk)) {
+ ret = dev_err_probe(&pdev->dev, PTR_ERR(ab_ahb->xo_clk),
+ "failed to get xo clock\n");
+ goto err_mem_ce_unmap;
+ }
+
+ ret = clk_prepare_enable(ab_ahb->xo_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable gcc_xo_clk: %d\n", ret);
+ goto err_clock_deinit;
+ }
+
+ return 0;
+
+err_clock_deinit:
+ devm_clk_put(ab->dev, ab_ahb->xo_clk);
+
+err_mem_ce_unmap:
+ ab_ahb->xo_clk = NULL;
+ if (ab->hw_params->ce_remap)
+ iounmap(ab->mem_ce);
+
+err_mem_unmap:
+ ab->mem_ce = NULL;
+ devm_iounmap(ab->dev, ab->mem);
+
+out:
+ ab->mem = NULL;
+ return ret;
+}
+
+static void ath12k_ahb_resource_deinit(struct ath12k_base *ab)
+{
+ struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab);
+
+ if (ab->mem)
+ devm_iounmap(ab->dev, ab->mem);
+
+ if (ab->mem_ce)
+ iounmap(ab->mem_ce);
+
+ ab->mem = NULL;
+ ab->mem_ce = NULL;
+
+ clk_disable_unprepare(ab_ahb->xo_clk);
+ devm_clk_put(ab->dev, ab_ahb->xo_clk);
+ ab_ahb->xo_clk = NULL;
+}
+
+static int ath12k_ahb_probe(struct platform_device *pdev)
+{
+ struct ath12k_base *ab;
+ const struct ath12k_hif_ops *hif_ops;
+ struct ath12k_ahb *ab_ahb;
+ enum ath12k_hw_rev hw_rev;
+ u32 addr, userpd_id;
+ int ret;
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set 32-bit coherent dma\n");
+ return ret;
+ }
+
+ ab = ath12k_core_alloc(&pdev->dev, sizeof(struct ath12k_ahb),
+ ATH12K_BUS_AHB);
+ if (!ab)
+ return -ENOMEM;
+
+ hw_rev = (enum ath12k_hw_rev)(kernel_ulong_t)of_device_get_match_data(&pdev->dev);
+ switch (hw_rev) {
+ case ATH12K_HW_IPQ5332_HW10:
+ hif_ops = &ath12k_ahb_hif_ops_ipq5332;
+ userpd_id = ATH12K_IPQ5332_USERPD_ID;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto err_core_free;
+ }
+
+ ab->hif.ops = hif_ops;
+ ab->pdev = pdev;
+ ab->hw_rev = hw_rev;
+ ab->target_mem_mode = ATH12K_QMI_MEMORY_MODE_DEFAULT;
+ platform_set_drvdata(pdev, ab);
+ ab_ahb = ath12k_ab_to_ahb(ab);
+ ab_ahb->ab = ab;
+ ab_ahb->userpd_id = userpd_id;
+
+ /* Set fixed_mem_region to true for platforms that support fixed memory
+ * reservation from DT. If memory is reserved from DT for FW, ath12k driver
+ * need not to allocate memory.
+ */
+ if (!of_property_read_u32(ab->dev->of_node, "memory-region", &addr))
+ set_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags);
+
+ ret = ath12k_core_pre_init(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath12k_ahb_resource_init(ab);
+ if (ret)
+ goto err_core_free;